diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index dac76342a9fdb48247cc171cfdf37fd6b60600ba..45d4056c6fc521c255b8fd781f50249b4e714543 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -14,28 +14,26 @@
 
 # stages of the cicd pipeline
 stages:
-  - dependencies
+  #- dependencies
   - build
-  - test
   - unit_test
-  - integ_test
-  - deploy
-  - funct_test
+  #- deploy
+  #- end2end_test
 
 # include the individual .gitlab-ci.yml of each micro-service
 include:
-  - local: '/manifests/.gitlab-ci.yml'
+  #- local: '/manifests/.gitlab-ci.yml'
   - local: '/src/monitoring/.gitlab-ci.yml'
   - local: '/src/compute/.gitlab-ci.yml'
   - local: '/src/context/.gitlab-ci.yml'
   - local: '/src/device/.gitlab-ci.yml'
   - local: '/src/service/.gitlab-ci.yml'
-  - local: '/src/dbscanserving/.gitlab-ci.yml'
-  - local: '/src/opticalattackmitigator/.gitlab-ci.yml'
-  - local: '/src/opticalcentralizedattackdetector/.gitlab-ci.yml'
+  #- local: '/src/dbscanserving/.gitlab-ci.yml'
+  #- local: '/src/opticalattackmitigator/.gitlab-ci.yml'
+  #- local: '/src/opticalcentralizedattackdetector/.gitlab-ci.yml'
   - local: '/src/automation/.gitlab-ci.yml'
   - local: '/src/policy/.gitlab-ci.yml'
-  - local: '/src/webui/.gitlab-ci.yml'
+  #- local: '/src/webui/.gitlab-ci.yml'
   #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml'
   #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml'
   #- local: '/src/l3_attackmitigator/.gitlab-ci.yml'
@@ -43,3 +41,4 @@ include:
   #- local: '/src/interdomain/.gitlab-ci.yml'
   - local: '/src/pathcomp/.gitlab-ci.yml'
   #- local: '/src/dlt/.gitlab-ci.yml'
+  - local: '/src/load_generator/.gitlab-ci.yml'
diff --git a/INSTALL.md b/INSTALL.md
deleted file mode 100644
index 670af487313498ad60f8c0fc89029218efe29405..0000000000000000000000000000000000000000
--- a/INSTALL.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# TeraFlow OS SDN Controller Installation Instructions 
-Assuming you have a running Kubernetes deployment installed following the instructions provided in [Wiki: Installing Kubernetes on your Linux machine](https://gitlab.com/teraflow-h2020/controller/-/wikis/Installing-Kubernetes-on-your-Linux-machine), the following instructions will let you deploy TeraFlow OS SDN Controller in your local Kubernetes environment.
-
-Then, follow the instructions in [Wiki: Deploying a TeraFlow OS test instance](https://gitlab.com/teraflow-h2020/controller/-/wikis/Deploying-a-TeraFlow-OS-test-instance) to deploy your instance of TeraFlow OS.
diff --git a/README.md b/README.md
index 0336b9f6cdb9562ccff27d73f058d6293604de6b..88af0327237d483a113dc03dce6b67e7c81ba7c0 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,12 @@
 # TeraFlowSDN Controller
 
-[Teraflow H2020 project](https://teraflow-h2020.eu/) - Secured autonomic traffic management for a Tera of SDN Flows
+[ETSI OpenSource Group for TeraFlowSDN](https://tfs.etsi.org/)
 
-Branch "master" : [![pipeline status](https://gitlab.com/teraflow-h2020/controller/badges/master/pipeline.svg)](https://gitlab.com/teraflow-h2020/controller/-/commits/master) [![coverage report](https://gitlab.com/teraflow-h2020/controller/badges/master/coverage.svg)](https://gitlab.com/teraflow-h2020/controller/-/commits/master)
+Former, [Teraflow H2020 project](https://teraflow-h2020.eu/) - Secured autonomic traffic management for a Tera of SDN Flows
 
-Branch "develop" : [![pipeline status](https://gitlab.com/teraflow-h2020/controller/badges/develop/pipeline.svg)](https://gitlab.com/teraflow-h2020/controller/-/commits/develop) [![coverage report](https://gitlab.com/teraflow-h2020/controller/badges/develop/coverage.svg)](https://gitlab.com/teraflow-h2020/controller/-/commits/develop)
+Branch "master" : [![pipeline status](https://labs.etsi.org/rep/tfs/controller/badges/master/pipeline.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/master) [![coverage report](https://labs.etsi.org/rep/tfs/controller/badges/master/coverage.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/master)
+
+Branch "develop" : [![pipeline status](https://labs.etsi.org/rep/tfs/controller/badges/develop/pipeline.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/develop) [![coverage report](https://labs.etsi.org/rep/tfs/controller/badges/develop/coverage.svg)](https://labs.etsi.org/rep/tfs/controller/-/commits/develop)
 
 # Installation Instructions
-For devel and upcoming release 2.0, we have prepared the following tutorial: [TeraFlowSDN tutorial](https://gitlab.com/teraflow-h2020/controller/-/tree/develop/tutorial).
+For devel and upcoming release 2.0, check the Wiki pages: [TeraFlowSDN Wiki](https://labs.etsi.org/rep/tfs/controller/-/wikis/home).
diff --git a/common_requirements.in b/common_requirements.in
index 75610976bc4a17f0fdd93324333234c22b2aa2bd..cb06c3b653e75315e3700d8f1f434a5e5435610f 100644
--- a/common_requirements.in
+++ b/common_requirements.in
@@ -16,8 +16,10 @@ coverage==6.3
 grpcio==1.47.*
 grpcio-health-checking==1.47.*
 grpcio-tools==1.47.*
+prettytable==3.5.0
 prometheus-client==0.13.0
 protobuf==3.20.*
 pytest==6.2.5
 pytest-benchmark==3.4.1
 python-dateutil==2.8.2
+pytest-depends==1.0.1
diff --git a/coverage/.coveragerc.template b/coverage/.coveragerc.template
index e5e634c2c256103b1796d9309a3433ae9f248e70..8863d4d6ea58b967949a888d85af02d979244420 100644
--- a/coverage/.coveragerc.template
+++ b/coverage/.coveragerc.template
@@ -1,5 +1,5 @@
 [run]
-data_file = ~/teraflow/controller/coverage/.coverage
+data_file = ~/tfs-ctrl/coverage/.coverage
 source = .
 omit =
     */proto/*
@@ -12,7 +12,7 @@ exclude_lines =
     raise\ NotImplementedError
 
 [html]
-directory = ~/teraflow/controller/coverage/html_report
+directory = ~/tfs-ctrl/coverage/html_report
 
 [xml]
 output = ~/teraflow/controller/coverage/report.xml
diff --git a/deploy/all.sh b/deploy/all.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c6da23366d3cd74b63fde87c3f24960c3bc2999b
--- /dev/null
+++ b/deploy/all.sh
@@ -0,0 +1,118 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+# If not already set, set the URL of the Docker registry where the images will be uploaded to.
+# By default, assume internal MicroK8s registry is used.
+export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
+
+# If not already set, set the list of components you want to build images for, and deploy.
+# By default, only basic components are deployed
+export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device monitoring service compute webui"}
+
+# If not already set, set the tag you want to use for your images.
+export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+# If not already set, set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""}
+
+# If not already set, set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
+
+# If not already set, disable skip-build flag.
+# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""}
+
+# If not already set, set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"}
+
+# If not already set, set the database username to be used by Context.
+export CRDB_USERNAME=${CRDB_USERNAME:-"tfs"}
+
+# If not already set, set the database user's password to be used by Context.
+export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
+
+# If not already set, set the database name to be used by Context.
+export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
+
+# If not already set, set the name of the secret where CockroachDB data and credentials will be stored.
+export CRDB_SECRET_NAME=${CRDB_SECRET_NAME:-"crdb-data"}
+
+# If not already set, set the namespace where the secret containing CockroachDB data and credentials will be stored.
+export CRDB_SECRET_NAMESPACE=${CRDB_SECRET_NAMESPACE:-${TFS_K8S_NAMESPACE}}
+
+# If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'.
+# "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while
+# checking/deploying CockroachDB.
+# - If CRDB_DEPLOY_MODE is "single", CockroachDB is deployed in single node mode. It is convenient for
+#   development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
+# - If CRDB_DEPLOY_MODE is "cluster", CockroachDB is deployed in cluster mode, and an entire CockroachDB cluster
+#   with 3 replicas and version v22.2 (set by default) will be deployed. It is convenient for production and
+#   provides scalability features. If you are deploying for production, also read the following link providing
+#   details on deploying CockroachDB for production environments:
+#   Ref: https://www.cockroachlabs.com/docs/stable/recommended-production-settings.html
+export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"}
+
+# If not already set, disable flag for dropping database if exists.
+# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION!
+# If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while
+# checking/deploying CockroachDB.
+export CRDB_DROP_DATABASE_IF_EXISTS=${CRDB_DROP_DATABASE_IF_EXISTS:-""}
+
+# If not already set, disable flag for re-deploying CockroachDB from scratch.
+# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION!
+# WARNING: THE REDEPLOY MIGHT TAKE FEW MINUTES TO COMPLETE GRACEFULLY IN CLUSTER MODE
+# If CRDB_REDEPLOY is "YES", the database will be dropped while checking/deploying CockroachDB.
+export CRDB_REDEPLOY=${CRDB_REDEPLOY:-""}
+
+# If not already set, set the namespace where NATS will be deployed.
+export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"}
+
+# If not already set, set the name of the secret where NATS data and credentials will be stored.
+export NATS_SECRET_NAME=${NATS_SECRET_NAME:-"nats-data"}
+
+# If not already set, set the namespace where the secret containing NATS data and credentials will be stored.
+export NATS_SECRET_NAMESPACE=${NATS_SECRET_NAMESPACE:-${TFS_K8S_NAMESPACE}}
+
+# If not already set, disable flag for re-deploying NATS from scratch.
+# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
+# If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS.
+export NATS_REDEPLOY=${NATS_REDEPLOY:-""}
+
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Deploy CockroachDB
+./deploy/crdb.sh
+
+# Deploy NATS
+./deploy/nats.sh
+
+# Deploy TFS
+./deploy/tfs.sh
+
+# Show deploy summary
+./deploy/show.sh
+
+echo "Done!"
diff --git a/deploy_component.sh b/deploy/component.sh
similarity index 93%
rename from deploy_component.sh
rename to deploy/component.sh
index a4cf6184c83ef026562abe8e084430bba3ead9c8..443bee6016b48ccefe93cc5aaf95f7c92f9ee725 100755
--- a/deploy_component.sh
+++ b/deploy/component.sh
@@ -18,10 +18,9 @@
 # Read deployment settings
 ########################################################################################################################
 
-# If not already set, set the URL of your local Docker registry where the images will be uploaded to.
-# Leave it blank if you do not want to use any Docker registry.
-export TFS_REGISTRY_IMAGE=${TFS_REGISTRY_IMAGE:-""}
-#export TFS_REGISTRY_IMAGE="http://my-container-registry.local/"
+# If not already set, set the URL of the Docker registry where the images will be uploaded to.
+# By default, assume internal MicroK8s registry is used.
+export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
 
 TFS_COMPONENTS=$1
 
@@ -42,7 +41,7 @@ export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
 ########################################################################################################################
 
 # Constants
-GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller"
+GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
 TMP_FOLDER="./tmp"
 
 # Create a tmp folder for files modified during the deployment
@@ -55,7 +54,7 @@ ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh
 for COMPONENT in $TFS_COMPONENTS; do
     echo "Processing '$COMPONENT' component..."
     IMAGE_NAME="$COMPONENT:$TFS_IMAGE_TAG"
-    IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g')
+    IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g')
 
     echo "  Building Docker image..."
     BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
@@ -74,8 +73,8 @@ for COMPONENT in $TFS_COMPONENTS; do
         docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
     fi
 
-    if [ -n "$TFS_REGISTRY_IMAGE" ]; then
-        echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGE'..."
+    if [ -n "$TFS_REGISTRY_IMAGES" ]; then
+        echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGES'..."
 
         if [ "$COMPONENT" == "pathcomp" ]; then
             TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log"
@@ -102,7 +101,7 @@ for COMPONENT in $TFS_COMPONENTS; do
     MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
     cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
 
-    if [ -n "$TFS_REGISTRY_IMAGE" ]; then
+    if [ -n "$TFS_REGISTRY_IMAGES" ]; then
         # Registry is set
         if [ "$COMPONENT" == "pathcomp" ]; then
             VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3)
diff --git a/deploy/crdb.sh b/deploy/crdb.sh
new file mode 100755
index 0000000000000000000000000000000000000000..76aa0737087cb9bfe28b4a42e6ebd58b5b784b7e
--- /dev/null
+++ b/deploy/crdb.sh
@@ -0,0 +1,358 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+# If not already set, set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"}
+
+# If not already set, set the database username to be used by Context.
+export CRDB_USERNAME=${CRDB_USERNAME:-"tfs"}
+
+# If not already set, set the database user's password to be used by Context.
+export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
+
+# If not already set, set the database name to be used by Context.
+export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
+
+# If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'.
+# "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while
+# checking/deploying CockroachDB.
+# - If CRDB_DEPLOY_MODE is "single", CockroachDB is deployed in single node mode. It is convenient for
+#   development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
+# - If CRDB_DEPLOY_MODE is "cluster", CockroachDB is deployed in cluster mode, and an entire CockroachDB cluster
+#   with 3 replicas and version v22.2 (set by default) will be deployed. It is convenient for production and
+#   provides scalability features. If you are deploying for production, also read the following link providing
+#   details on deploying CockroachDB for production environments:
+#   Ref: https://www.cockroachlabs.com/docs/stable/recommended-production-settings.html
+export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"}
+
+# If not already set, disable flag for dropping database if exists.
+# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION!
+# If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while
+# checking/deploying CockroachDB.
+export CRDB_DROP_DATABASE_IF_EXISTS=${CRDB_DROP_DATABASE_IF_EXISTS:-""}
+
+# If not already set, disable flag for re-deploying CockroachDB from scratch.
+# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION!
+# WARNING: THE REDEPLOY MIGHT TAKE FEW MINUTES TO COMPLETE GRACEFULLY IN CLUSTER MODE
+# If CRDB_REDEPLOY is "YES", the database will be dropped while checking/deploying CockroachDB.
+export CRDB_REDEPLOY=${CRDB_REDEPLOY:-""}
+
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Constants
+TMP_FOLDER="./tmp"
+CRDB_MANIFESTS_PATH="manifests/cockroachdb"
+
+# Create a tmp folder for files modified during the deployment
+TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
+TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
+CRDB_LOG_FILE="$TMP_LOGS_FOLDER/crdb_deploy.log"
+mkdir -p $TMP_LOGS_FOLDER
+
+function crdb_deploy_single() {
+    echo "CockroachDB Namespace"
+    echo ">>> Create CockroachDB Namespace (if missing)"
+    kubectl create namespace ${CRDB_NAMESPACE}
+    echo
+
+    echo "CockroachDB (single-node)"
+    echo ">>> Checking if CockroachDB is deployed..."
+    if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
+        echo ">>> CockroachDB is present; skipping step."
+    else
+        echo ">>> Deploy CockroachDB"
+        cp "${CRDB_MANIFESTS_PATH}/single-node.yaml" "${TMP_MANIFESTS_FOLDER}/crdb_single_node.yaml"
+        sed -i "s/%CRDB_DATABASE%/${CRDB_DATABASE}/g" "${TMP_MANIFESTS_FOLDER}/crdb_single_node.yaml"
+        sed -i "s/%CRDB_USERNAME%/${CRDB_USERNAME}/g" "${TMP_MANIFESTS_FOLDER}/crdb_single_node.yaml"
+        sed -i "s/%CRDB_PASSWORD%/${CRDB_PASSWORD}/g" "${TMP_MANIFESTS_FOLDER}/crdb_single_node.yaml"
+        kubectl apply --namespace ${CRDB_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/crdb_single_node.yaml"
+
+        echo ">>> Waiting CockroachDB statefulset to be created..."
+        while ! kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+
+        # Wait for statefulset condition "Available=True" does not work
+        # Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error:
+        #   "error: readyReplicas is not found"
+        # Workaround: Check the pods are ready
+        #echo ">>> CockroachDB statefulset created. Waiting for readiness condition..."
+        #kubectl wait --namespace  ${CRDB_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/cockroachdb
+        #kubectl wait --namespace ${CRDB_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \
+        #    statefulset/cockroachdb
+        echo ">>> CockroachDB statefulset created. Waiting CockroachDB pods to be created..."
+        while ! kubectl get --namespace ${CRDB_NAMESPACE} pod/cockroachdb-0 &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+        kubectl wait --namespace ${CRDB_NAMESPACE} --for=condition=Ready --timeout=300s pod/cockroachdb-0
+    fi
+    echo
+
+    echo "CockroachDB Port Mapping"
+    echo ">>> Expose CockroachDB SQL port (26257)"
+    CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+    PATCH='{"data": {"'${CRDB_SQL_PORT}'": "'${CRDB_NAMESPACE}'/cockroachdb-public:'${CRDB_SQL_PORT}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${CRDB_SQL_PORT}', "hostPort": '${CRDB_SQL_PORT}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+
+    echo ">>> Expose CockroachDB HTTP Mgmt GUI port (8080)"
+    CRDB_GUI_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
+    PATCH='{"data": {"'${CRDB_GUI_PORT}'": "'${CRDB_NAMESPACE}'/cockroachdb-public:'${CRDB_GUI_PORT}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${CRDB_GUI_PORT}', "hostPort": '${CRDB_GUI_PORT}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+}
+
+function crdb_undeploy_single() {
+    echo "CockroachDB"
+    echo ">>> Checking if CockroachDB is deployed..."
+    if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
+        echo ">>> Undeploy CockroachDB"
+        kubectl delete --namespace ${CRDB_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/crdb_single_node.yaml" --ignore-not-found
+    else
+        echo ">>> CockroachDB is not present; skipping step."
+    fi
+    echo
+
+    echo "CockroachDB Namespace"
+    echo ">>> Delete CockroachDB Namespace (if exists)"
+    echo "NOTE: this step might take few minutes to complete!"
+    kubectl delete namespace ${CRDB_NAMESPACE} --ignore-not-found
+    echo
+}
+
+function crdb_drop_database_single() {
+    echo "Drop database if exists"
+    CRDB_CLIENT_URL="postgresql://${CRDB_USERNAME}:${CRDB_PASSWORD}@cockroachdb-0:${CRDB_SQL_PORT}/defaultdb?sslmode=require"
+    kubectl exec -it --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \
+        ./cockroach sql --certs-dir=/cockroach/cockroach-certs --url=${CRDB_CLIENT_URL} \
+        --execute "DROP DATABASE IF EXISTS ${CRDB_DATABASE};"
+    echo
+}
+
+function crdb_deploy_cluster() {
+    echo "Cockroach Operator CRDs"
+    echo ">>> Apply Cockroach Operator CRDs (if they are missing)"
+    cp "${CRDB_MANIFESTS_PATH}/crds.yaml" "${TMP_MANIFESTS_FOLDER}/crdb_crds.yaml"
+    kubectl apply -f "${TMP_MANIFESTS_FOLDER}/crdb_crds.yaml"
+    echo
+
+    echo "Cockroach Operator"
+    echo ">>> Checking if Cockroach Operator is deployed..."
+    if kubectl get --namespace cockroach-operator-system deployment/cockroach-operator-manager &> /dev/null; then
+        echo ">>> Cockroach Operator is present; skipping step."
+    else
+        echo ">>> Deploy Cockroach Operator"
+        sed "s/%TFS_CRDB_NAMESPACE%/${CRDB_NAMESPACE}/g" "${CRDB_MANIFESTS_PATH}/operator.yaml" \
+            > "${TMP_MANIFESTS_FOLDER}/crdb_operator.yaml"
+        kubectl apply -f "${TMP_MANIFESTS_FOLDER}/crdb_operator.yaml"
+        kubectl wait --namespace cockroach-operator-system --for=condition=Available=True --timeout=300s \
+            deployment/cockroach-operator-manager
+        #kubectl wait --namespace cockroach-operator-system --for=jsonpath='{.status.readyReplicas}'=1 --timeout=300s \
+        #    deployment/cockroach-operator-manager
+
+        echo ">>> Waiting for Cockroach Operator Webhock service..."
+        while ! kubectl get service cockroach-operator-webhook-service --namespace cockroach-operator-system &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+        WEBHOOK_SERVICE_DATA=$(kubectl get service cockroach-operator-webhook-service --namespace cockroach-operator-system -o json)
+        WEBHOOK_SERVICE_HOST=$(echo ${WEBHOOK_SERVICE_DATA} | jq -r '.spec.clusterIP')
+        WEBHOOK_SERVICE_PORT=$(echo ${WEBHOOK_SERVICE_DATA} | jq -r '.spec.ports[] | select(.targetPort==9443) | .port')
+        WEBHOOK_URL="https://${WEBHOOK_SERVICE_HOST}:${WEBHOOK_SERVICE_PORT}/mutate-crdb-cockroachlabs-com-v1alpha1-crdbcluster?timeout=10s"
+        while ! curl --insecure --header 'Content-Type: application/json' ${WEBHOOK_URL} &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+    fi
+    echo
+
+    echo "CockroachDB Namespace"
+    echo ">>> Create CockroachDB Namespace (if missing)"
+    kubectl create namespace ${CRDB_NAMESPACE}
+    echo
+
+    echo "CockroachDB"
+    echo ">>> Checking if CockroachDB is deployed..."
+    if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
+        echo ">>> CockroachDB is present; skipping step."
+    else
+        echo ">>> Deploy CockroachDB"
+        cp "${CRDB_MANIFESTS_PATH}/cluster.yaml" "${TMP_MANIFESTS_FOLDER}/crdb_cluster.yaml"
+        kubectl apply --namespace ${CRDB_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/crdb_cluster.yaml"
+
+        echo ">>> Waiting CockroachDB statefulset to be created..."
+        while ! kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+
+        # Wait for statefulset condition "Available=True" does not work
+        # Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error:
+        #   "error: readyReplicas is not found"
+        # Workaround: Check the pods are ready
+        #echo ">>> CockroachDB statefulset created. Waiting for readiness condition..."
+        #kubectl wait --namespace  ${CRDB_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/cockroachdb
+        #kubectl wait --namespace ${CRDB_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \
+        #    statefulset/cockroachdb
+        echo ">>> CockroachDB statefulset created. Waiting CockroachDB pods to be created..."
+        while ! kubectl get --namespace ${CRDB_NAMESPACE} pod/cockroachdb-0 &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+        while ! kubectl get --namespace ${CRDB_NAMESPACE} pod/cockroachdb-1 &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+        while ! kubectl get --namespace ${CRDB_NAMESPACE} pod/cockroachdb-2 &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+        kubectl wait --namespace ${CRDB_NAMESPACE} --for=condition=Ready --timeout=300s pod/cockroachdb-0
+        kubectl wait --namespace ${CRDB_NAMESPACE} --for=condition=Ready --timeout=300s pod/cockroachdb-1
+        kubectl wait --namespace ${CRDB_NAMESPACE} --for=condition=Ready --timeout=300s pod/cockroachdb-2
+    fi
+    echo
+
+    echo "CockroachDB Client"
+    echo ">>> Checking if CockroachDB Client is deployed..."
+    if kubectl get --namespace ${CRDB_NAMESPACE} pod/cockroachdb-client-secure &> /dev/null; then
+        echo ">>> CockroachDB Client is present; skipping step."
+    else
+        echo ">>> Deploy CockroachDB Client"
+        cp "${CRDB_MANIFESTS_PATH}/client-secure-operator.yaml" "${TMP_MANIFESTS_FOLDER}/crdb_client-secure-operator.yaml"
+        kubectl create --namespace ${CRDB_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/crdb_client-secure-operator.yaml"
+        kubectl wait --namespace ${CRDB_NAMESPACE} --for=condition=Ready --timeout=300s pod/cockroachdb-client-secure
+    fi
+    echo
+
+    echo "Add tfs user and grant admin rights"
+    kubectl exec -it cockroachdb-client-secure --namespace ${CRDB_NAMESPACE} -- \
+        ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public --execute \
+        "CREATE USER ${CRDB_USERNAME} WITH PASSWORD '${CRDB_PASSWORD}'; GRANT admin TO ${CRDB_USERNAME};"
+    echo
+
+    echo "CockroachDB Port Mapping"
+    echo ">>> Expose CockroachDB SQL port (26257)"
+    CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+    PATCH='{"data": {"'${CRDB_SQL_PORT}'": "'${CRDB_NAMESPACE}'/cockroachdb-public:'${CRDB_SQL_PORT}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${CRDB_SQL_PORT}', "hostPort": '${CRDB_SQL_PORT}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+
+    echo ">>> Expose CockroachDB HTTP Mgmt GUI port (8080)"
+    CRDB_GUI_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
+    PATCH='{"data": {"'${CRDB_GUI_PORT}'": "'${CRDB_NAMESPACE}'/cockroachdb-public:'${CRDB_GUI_PORT}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${CRDB_GUI_PORT}', "hostPort": '${CRDB_GUI_PORT}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+}
+
+function crdb_undeploy_cluster() {
+    echo "CockroachDB Client"
+    echo ">>> Checking if CockroachDB Client is deployed..."
+    if kubectl get --namespace ${CRDB_NAMESPACE} pod/cockroachdb-client-secure &> /dev/null; then
+        echo ">>> Undeploy CockroachDB Client"
+        kubectl delete --namespace ${CRDB_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/crdb_client-secure-operator.yaml" \
+            --ignore-not-found
+    else
+        echo ">>> CockroachDB Client is not present; skipping step."
+    fi
+    echo
+
+    echo "CockroachDB"
+    echo ">>> Checking if CockroachDB is deployed..."
+    if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
+        echo ">>> Undeploy CockroachDB"
+        kubectl delete --namespace ${CRDB_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/crdb_cluster.yaml" --ignore-not-found
+    else
+        echo ">>> CockroachDB is not present; skipping step."
+    fi
+    echo
+
+    echo "CockroachDB Namespace"
+    echo ">>> Delete CockroachDB Namespace (if exists)"
+    echo "NOTE: this step might take few minutes to complete!"
+    kubectl delete namespace ${CRDB_NAMESPACE} --ignore-not-found
+    echo
+
+    echo "CockroachDB Operator"
+    echo ">>> Checking if CockroachDB Operator is deployed..."
+    if kubectl get --namespace cockroach-operator-system deployment/cockroach-operator-manager &> /dev/null; then
+        echo ">>> Undeploy CockroachDB Operator"
+        kubectl delete -f "${TMP_MANIFESTS_FOLDER}/crdb_operator.yaml" --ignore-not-found
+    else
+        echo ">>> CockroachDB Operator is not present; skipping step."
+    fi
+    echo
+
+    echo "CockroachDB Operator CRDs"
+    echo ">>> Delete CockroachDB Operator CRDs (if they exist)"
+    kubectl delete -f "${TMP_MANIFESTS_FOLDER}/crdb_crds.yaml" --ignore-not-found
+    echo
+}
+
+function crdb_drop_database_cluster() {
+    echo "Drop database if exists"
+    kubectl exec -it --namespace ${CRDB_NAMESPACE} cockroachdb-client-secure -- \
+        ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public --execute \
+        "DROP DATABASE IF EXISTS ${CRDB_DATABASE};"
+    echo
+}
+
+if [ "$CRDB_DEPLOY_MODE" == "single" ]; then
+    if [ "$CRDB_REDEPLOY" == "YES" ]; then
+        crdb_undeploy_single
+    elif [ "$CRDB_DROP_DATABASE_IF_EXISTS" == "YES" ]; then
+        crdb_drop_database_single
+    fi
+    crdb_deploy_single
+elif [ "$CRDB_DEPLOY_MODE" == "cluster" ]; then
+    if [ "$CRDB_REDEPLOY" == "YES" ]; then
+        crdb_undeploy_cluster
+    elif [ "$CRDB_DROP_DATABASE_IF_EXISTS" == "YES" ]; then
+        crdb_drop_database_cluster
+    fi
+    crdb_deploy_cluster
+else
+    echo "Unsupported value: CRDB_DEPLOY_MODE=$CRDB_DEPLOY_MODE"
+fi
diff --git a/deploy_mock_blockchain.sh b/deploy/mock_blockchain.sh
similarity index 98%
rename from deploy_mock_blockchain.sh
rename to deploy/mock_blockchain.sh
index 066820fc0f9a1005823dd124798e4de122f206f8..f741f069f4ac87fea478cccc043951100f05e0e0 100755
--- a/deploy_mock_blockchain.sh
+++ b/deploy/mock_blockchain.sh
@@ -34,7 +34,7 @@ COMPONENT="mock_blockchain"
 ########################################################################################################################
 
 # Constants
-GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller"
+GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
 TMP_FOLDER="./tmp"
 
 # Create a tmp folder for files modified during the deployment
diff --git a/deploy/nats.sh b/deploy/nats.sh
new file mode 100755
index 0000000000000000000000000000000000000000..54402899698995ceaddbebde2254528c4b33a315
--- /dev/null
+++ b/deploy/nats.sh
@@ -0,0 +1,128 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+# If not already set, set the namespace where NATS will be deployed.
+export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"}
+
+# If not already set, disable flag for re-deploying NATS from scratch.
+# WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
+# If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS.
+export NATS_REDEPLOY=${NATS_REDEPLOY:-""}
+
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Constants
+TMP_FOLDER="./tmp"
+NATS_MANIFESTS_PATH="manifests/nats"
+
+# Create a tmp folder for files modified during the deployment
+TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
+mkdir -p $TMP_MANIFESTS_FOLDER
+
+function nats_deploy_single() {
+    echo "NATS Namespace"
+    echo ">>> Create NATS Namespace (if missing)"
+    kubectl create namespace ${NATS_NAMESPACE}
+    echo
+
+    echo "Add NATS Helm Chart"
+    helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/
+    echo
+
+    echo "Install NATS (single-node)"
+    echo ">>> Checking if NATS is deployed..."
+    if kubectl get --namespace ${NATS_NAMESPACE} statefulset/nats &> /dev/null; then
+        echo ">>> NATS is present; skipping step."
+    else
+        echo ">>> Deploy NATS"
+        helm3 install nats nats/nats --namespace ${NATS_NAMESPACE} --set nats.image.tag=2.9-alpine
+
+        echo ">>> Waiting NATS statefulset to be created..."
+        while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/nats &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+
+        # Wait for statefulset condition "Available=True" does not work
+        # Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error:
+        #   "error: readyReplicas is not found"
+        # Workaround: Check the pods are ready
+        #echo ">>> NATS statefulset created. Waiting for readiness condition..."
+        #kubectl wait --namespace  ${NATS_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/nats
+        #kubectl wait --namespace ${NATS_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \
+        #    statefulset/nats
+        echo ">>> NATS statefulset created. Waiting NATS pods to be created..."
+        while ! kubectl get --namespace ${NATS_NAMESPACE} pod/nats-0 &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+        kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/nats-0
+    fi
+    echo
+
+    echo "NATS Port Mapping"
+    echo ">>> Expose NATS Client port (4222)"
+    NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service nats -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
+    PATCH='{"data": {"'${NATS_CLIENT_PORT}'": "'${NATS_NAMESPACE}'/nats:'${NATS_CLIENT_PORT}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${NATS_CLIENT_PORT}', "hostPort": '${NATS_CLIENT_PORT}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+
+    echo ">>> Expose NATS HTTP Mgmt GUI port (8222)"
+    NATS_GUI_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service nats -o 'jsonpath={.spec.ports[?(@.name=="monitor")].port}')
+    PATCH='{"data": {"'${NATS_GUI_PORT}'": "'${NATS_NAMESPACE}'/nats:'${NATS_GUI_PORT}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${NATS_GUI_PORT}', "hostPort": '${NATS_GUI_PORT}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+}
+
+function nats_undeploy_single() {
+    echo "NATS"
+    echo ">>> Checking if NATS is deployed..."
+    if kubectl get --namespace ${NATS_NAMESPACE} statefulset/nats &> /dev/null; then
+        echo ">>> Undeploy NATS"
+        helm3 uninstall --namespace ${NATS_NAMESPACE} nats
+    else
+        echo ">>> NATS is not present; skipping step."
+    fi
+    echo
+
+    echo "NATS Namespace"
+    echo ">>> Delete NATS Namespace (if exists)"
+    kubectl delete namespace ${NATS_NAMESPACE} --ignore-not-found
+    echo
+}
+
+if [ "$NATS_REDEPLOY" == "YES" ]; then
+    nats_undeploy_single
+fi
+
+nats_deploy_single
diff --git a/show_deploy.sh b/deploy/show.sh
similarity index 100%
rename from show_deploy.sh
rename to deploy/show.sh
diff --git a/deploy.sh b/deploy/tfs.sh
similarity index 66%
rename from deploy.sh
rename to deploy/tfs.sh
index c62778417f7e07a119c778b58fe9c44105d5b1a5..86043ee44829904786e700df813400476ca4e755 100755
--- a/deploy.sh
+++ b/deploy/tfs.sh
@@ -18,10 +18,9 @@
 # Read deployment settings
 ########################################################################################################################
 
-# If not already set, set the URL of your local Docker registry where the images will be uploaded to.
-# Leave it blank if you do not want to use any Docker registry.
-export TFS_REGISTRY_IMAGE=${TFS_REGISTRY_IMAGE:-""}
-#export TFS_REGISTRY_IMAGE="http://my-container-registry.local/"
+# If not already set, set the URL of the Docker registry where the images will be uploaded to.
+# By default, assume internal MicroK8s registry is used.
+export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
 
 # If not already set, set the list of components you want to build images for, and deploy.
 # By default, only basic components are deployed
@@ -43,12 +42,28 @@ export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
 # If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
 export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""}
 
+# If not already set, set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"}
+
+# If not already set, set the database username to be used by Context.
+export CRDB_USERNAME=${CRDB_USERNAME:-"tfs"}
+
+# If not already set, set the database user's password to be used by Context.
+export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
+
+# If not already set, set the database name to be used by Context.
+export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
+
+# If not already set, set the namespace where NATS will be deployed.
+export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"}
+
+
 ########################################################################################################################
 # Automated steps start here
 ########################################################################################################################
 
 # Constants
-GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller"
+GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
 TMP_FOLDER="./tmp"
 
 # Create a tmp folder for files modified during the deployment
@@ -62,6 +77,24 @@ kubectl delete namespace $TFS_K8S_NAMESPACE
 kubectl create namespace $TFS_K8S_NAMESPACE
 printf "\n"
 
+echo "Create secret with CockroachDB data"
+CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
+    --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
+    --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
+    --from-literal=CRDB_DATABASE=${CRDB_DATABASE} \
+    --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
+    --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
+    --from-literal=CRDB_SSLMODE=require
+printf "\n"
+
+echo "Create secret with NATS data"
+NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service nats -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
+kubectl create secret generic nats-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
+    --from-literal=NATS_NAMESPACE=${NATS_NAMESPACE} \
+    --from-literal=NATS_CLIENT_PORT=${NATS_CLIENT_PORT}
+printf "\n"
+
 echo "Deploying components and collecting environment variables..."
 ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh
 echo "# Environment variables for TeraFlowSDN deployment" > $ENV_VARS_SCRIPT
@@ -96,50 +129,48 @@ for COMPONENT in $TFS_COMPONENTS; do
             docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
         fi
 
-        if [ -n "$TFS_REGISTRY_IMAGE" ]; then
-            echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGE'..."
+        echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGES'..."
 
-            if [ "$COMPONENT" == "pathcomp" ]; then
-                IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        if [ "$COMPONENT" == "pathcomp" ]; then
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
 
-                TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log"
-                docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log"
+            docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
 
-                PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log"
-                docker push "$IMAGE_URL" > "$PUSH_LOG"
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
 
-                IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
 
-                TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log"
-                docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log"
+            docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
 
-                PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log"
-                docker push "$IMAGE_URL" > "$PUSH_LOG"
-            elif [ "$COMPONENT" == "dlt" ]; then
-                IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+        elif [ "$COMPONENT" == "dlt" ]; then
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
 
-                TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log"
-                docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log"
+            docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
 
-                PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log"
-                docker push "$IMAGE_URL" > "$PUSH_LOG"
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
 
-                IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
 
-                TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log"
-                docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log"
+            docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
 
-                PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log"
-                docker push "$IMAGE_URL" > "$PUSH_LOG"
-            else
-                IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+        else
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
 
-                TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
-                docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
+            docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
 
-                PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
-                docker push "$IMAGE_URL" > "$PUSH_LOG"
-            fi
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
         fi
     fi
 
@@ -147,53 +178,30 @@ for COMPONENT in $TFS_COMPONENTS; do
     MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
     cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
 
-    if [ -n "$TFS_REGISTRY_IMAGE" ]; then
-        # Registry is set
-        if [ "$COMPONENT" == "pathcomp" ]; then
-            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
-            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
-
-            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
-            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
-        elif [ "$COMPONENT" == "dlt" ]; then
-            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
-            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
-
-            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
-            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
-        else
-            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
-            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
-        fi
-
-        sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
+    if [ "$COMPONENT" == "pathcomp" ]; then
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+    elif [ "$COMPONENT" == "dlt" ]; then
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
     else
-        # Registry is not set
-        if [ "$COMPONENT" == "pathcomp" ]; then
-            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $COMPONENT-frontend:$TFS_IMAGE_TAG#g" "$MANIFEST"
-
-            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $COMPONENT-backend:$TFS_IMAGE_TAG#g" "$MANIFEST"
-        elif [ "$COMPONENT" == "dlt" ]; then
-            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $COMPONENT-connector:$TFS_IMAGE_TAG#g" "$MANIFEST"
-
-            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $COMPONENT-gateway:$TFS_IMAGE_TAG#g" "$MANIFEST"
-        else
-            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $COMPONENT:$TFS_IMAGE_TAG#g" "$MANIFEST"
-        fi
-
-        sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
     fi
 
+    sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
+
     # TODO: harmonize names of the monitoring component
 
     echo "  Deploying '$COMPONENT' component to Kubernetes..."
@@ -345,6 +353,3 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"
     printf "\n\n"
 fi
 
-./show_deploy.sh
-
-echo "Done!"
diff --git a/hackfest/p4/tests/Objects.py b/hackfest/p4/tests/Objects.py
index c8b172244d714cd699ccc587e54c3751485a9a2e..dcef0255249cb0d7ed57ad18e2953a2e2796c303 100644
--- a/hackfest/p4/tests/Objects.py
+++ b/hackfest/p4/tests/Objects.py
@@ -14,7 +14,7 @@
 
 import os
 from typing import Dict, List, Tuple
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
     json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled,
@@ -28,12 +28,12 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 # ----- Topology -------------------------------------------------------------------------------------------------------
-TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
-TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
+TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID)
+TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID)
 
 # ----- Monitoring Samples ---------------------------------------------------------------------------------------------
 PACKET_PORT_SAMPLE_TYPES = [
diff --git a/manifests/.gitlab-ci.yml b/manifests/.gitlab-ci.yml
index d20b67e531c33bfeae9c796ed95488b4c81d0fe4..9ce323c58aabe079367bd84011101ce52168d725 100644
--- a/manifests/.gitlab-ci.yml
+++ b/manifests/.gitlab-ci.yml
@@ -14,10 +14,10 @@
 
 # Deployment of the dependency services in Kubernetes Cluster
 
-dependencies all:
-  stage: dependencies
-  script:
-    - kubectl version
-    - kubectl get all
-    - kubectl apply -f "manifests/prometheus.yaml"
-    - kubectl get all
+#dependencies all:
+#  stage: dependencies
+#  script:
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/prometheus.yaml"
+#    - kubectl get all
diff --git a/manifests/cockroachdb/README.md b/manifests/cockroachdb/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..bfd774f0f85ca567986ae452c30b4305b336c57e
--- /dev/null
+++ b/manifests/cockroachdb/README.md
@@ -0,0 +1,62 @@
+# CockroachDB configuration preparation
+
+These steps reproduce how to generate Cockroach manifest files used in TeraFlowSDN and apply them to MicroK8s.
+For stability reasons, we fix the versions providing the manifest files.
+In future releases of TeraFlowSDN, we might consider dynamically downloading and modifying the files.
+
+- Ref: https://www.cockroachlabs.com/docs/stable/configure-cockroachdb-kubernetes.html
+
+## Steps:
+
+DEPLOY_PATH="manifests/cockroachdb"
+OPERATOR_BASE_URL="https://raw.githubusercontent.com/cockroachdb/cockroach-operator/master"
+
+mkdir -p ${DEPLOY_PATH}
+
+# Apply Custom Resource Definition for the CockroachDB Operator
+curl -o "${DEPLOY_PATH}/crds.yaml" "${OPERATOR_BASE_URL}/install/crds.yaml"
+kubectl apply -f "${DEPLOY_PATH}/crds.yaml"
+
+# Deploy CockroachDB Operator
+curl -o "${DEPLOY_PATH}/operator.yaml" "${OPERATOR_BASE_URL}/install/operator.yaml"
+nano "${DEPLOY_PATH}/operator.yaml"
+# - add env var: WATCH_NAMESPACE=%TFS_CRDB_NAMESPACE%
+sed s/%TFS_CRDB_NAMESPACE%/crdb/g ${DEPLOY_PATH}/operator.yaml > ${DEPLOY_PATH}/tfs_crdb_operator.yaml
+kubectl apply -f "${DEPLOY_PATH}/tfs_crdb_operator.yaml"
+
+# Deploy CockroachDB
+curl -o "${DEPLOY_PATH}/cluster.yaml" "${OPERATOR_BASE_URL}/examples/example.yaml"
+nano "${DEPLOY_PATH}/cluster.yaml"
+# - set version
+# - set number of replicas
+kubectl create namespace crdb
+kubectl apply --namespace crdb -f "${DEPLOY_PATH}/cluster.yaml"
+
+# Deploy CockroachDB Client
+curl -o "${DEPLOY_PATH}/client-secure-operator.yaml" "${OPERATOR_BASE_URL}/examples/client-secure-operator.yaml"
+kubectl create --namespace crdb -f "${DEPLOY_PATH}/client-secure-operator.yaml"
+
+# Add tfs user with admin rights
+kubectl exec -it cockroachdb-client-secure --namespace crdb -- ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public
+    CREATE USER tfs WITH PASSWORD 'tfs123';
+    GRANT admin TO tfs;
+
+# Expose CockroachDB SQL port (26257)
+PORT=$(kubectl --namespace crdb get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+PATCH='{"data": {"'${PORT}'": "crdb/cockroachdb-public:'${PORT}'"}}'
+kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+PORT_MAP='{"containerPort": '${PORT}', "hostPort": '${PORT}'}'
+CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+
+# Expose CockroachDB Console port (8080)
+PORT=$(kubectl --namespace crdb get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
+PATCH='{"data": {"'${PORT}'": "crdb/cockroachdb-public:'${PORT}'"}}'
+kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+PORT_MAP='{"containerPort": '${PORT}', "hostPort": '${PORT}'}'
+CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
diff --git a/manifests/cockroachdb/client-secure-operator.yaml b/manifests/cockroachdb/client-secure-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f7f81c8339d4ba47722a0ef2a2236178f1b9e1b0
--- /dev/null
+++ b/manifests/cockroachdb/client-secure-operator.yaml
@@ -0,0 +1,51 @@
+# Copyright 2022 The Cockroach Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated, do not edit. Please edit this file instead: config/templates/client-secure-operator.yaml.in
+#
+
+apiVersion: v1
+kind: Pod
+metadata:
+  name: cockroachdb-client-secure
+spec:
+  serviceAccountName: cockroachdb-sa
+  containers:
+  - name: cockroachdb-client-secure
+    image: cockroachdb/cockroach:v22.2.0
+    imagePullPolicy: IfNotPresent
+    volumeMounts:
+    - name: client-certs
+      mountPath: /cockroach/cockroach-certs/
+    command:
+    - sleep
+    - "2147483648" # 2^31
+  terminationGracePeriodSeconds: 0
+  volumes:
+  - name: client-certs
+    projected:
+        sources:
+          - secret:
+              name: cockroachdb-node
+              items:
+                - key: ca.crt
+                  path: ca.crt
+          - secret:
+              name: cockroachdb-root
+              items:
+                - key: tls.crt
+                  path: client.root.crt
+                - key: tls.key
+                  path: client.root.key
+        defaultMode: 256
diff --git a/manifests/cockroachdb/cluster.yaml b/manifests/cockroachdb/cluster.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f7444c0067cc9c2c07b53c85d765bb81d1c20c05
--- /dev/null
+++ b/manifests/cockroachdb/cluster.yaml
@@ -0,0 +1,70 @@
+# Copyright 2022 The Cockroach Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated, do not edit. Please edit this file instead: config/templates/example.yaml.in
+#
+
+apiVersion: crdb.cockroachlabs.com/v1alpha1
+kind: CrdbCluster
+metadata:
+  # this translates to the name of the statefulset that is created
+  name: cockroachdb
+spec:
+  dataStore:
+    pvc:
+      spec:
+        accessModes:
+          - ReadWriteOnce
+        resources:
+          requests:
+            storage: "60Gi"
+        volumeMode: Filesystem
+  resources:
+    requests:
+      # This is intentionally low to make it work on local k3d clusters.
+      cpu: 100m
+      memory: 1Gi
+    limits:
+      cpu: 1
+      memory: 4Gi
+  tlsEnabled: true
+# You can set either a version of the db or a specific image name
+# cockroachDBVersion: v22.2.0
+  image:
+    name: cockroachdb/cockroach:v22.2.0
+  # nodes refers to the number of crdb pods that are created
+  # via the statefulset
+  nodes: 3
+  additionalLabels:
+    crdb: is-cool
+  # affinity is a new API field that is behind a feature gate that is
+  # disabled by default.  To enable please see the operator.yaml file.
+
+  # The affinity field will accept any podSpec affinity rule.
+  # affinity:
+  #   podAntiAffinity:
+  #      preferredDuringSchedulingIgnoredDuringExecution:
+  #      - weight: 100
+  #        podAffinityTerm:
+  #          labelSelector:
+  #            matchExpressions:
+  #            - key: app.kubernetes.io/instance
+  #              operator: In
+  #              values:
+  #              - cockroachdb
+  #          topologyKey: kubernetes.io/hostname
+
+  # nodeSelectors used to match against
+  # nodeSelector:
+  #   worker-pool-name: crdb-workers
diff --git a/manifests/cockroachdb/crds.yaml b/manifests/cockroachdb/crds.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1b5cd89ae7001b3e200c0de7da240b660c461f3b
--- /dev/null
+++ b/manifests/cockroachdb/crds.yaml
@@ -0,0 +1,1385 @@
+# Copyright 2022 The Cockroach Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    controller-gen.kubebuilder.io/version: (unknown)
+  creationTimestamp: null
+  name: crdbclusters.crdb.cockroachlabs.com
+spec:
+  group: crdb.cockroachlabs.com
+  names:
+    categories:
+    - all
+    - cockroachdb
+    kind: CrdbCluster
+    listKind: CrdbClusterList
+    plural: crdbclusters
+    shortNames:
+    - crdb
+    singular: crdbcluster
+  scope: Namespaced
+  versions:
+  - name: v1alpha1
+    schema:
+      openAPIV3Schema:
+        description: CrdbCluster is the CRD for the cockroachDB clusters API
+        properties:
+          apiVersion:
+            description: 'APIVersion defines the versioned schema of this representation
+              of an object. Servers should convert recognized schemas to the latest
+              internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+            type: string
+          kind:
+            description: 'Kind is a string value representing the REST resource this
+              object represents. Servers may infer this from the endpoint the client
+              submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+            type: string
+          metadata:
+            type: object
+          spec:
+            description: CrdbClusterSpec defines the desired state of a CockroachDB
+              Cluster that the operator maintains.
+            properties:
+              additionalAnnotations:
+                additionalProperties:
+                  type: string
+                description: (Optional) Additional custom resource annotations that
+                  are added to all resources. Changing `AdditionalAnnotations` field
+                  will result in cockroachDB cluster restart.
+                type: object
+              additionalArgs:
+                description: '(Optional) Additional command line arguments for the
+                  `cockroach` binary Default: ""'
+                items:
+                  type: string
+                type: array
+              additionalLabels:
+                additionalProperties:
+                  type: string
+                description: (Optional) Additional custom resource labels that are
+                  added to all resources
+                type: object
+              affinity:
+                description: (Optional) If specified, the pod's scheduling constraints
+                properties:
+                  nodeAffinity:
+                    description: Describes node affinity scheduling rules for the
+                      pod.
+                    properties:
+                      preferredDuringSchedulingIgnoredDuringExecution:
+                        description: The scheduler will prefer to schedule pods to
+                          nodes that satisfy the affinity expressions specified by
+                          this field, but it may choose a node that violates one or
+                          more of the expressions. The node that is most preferred
+                          is the one with the greatest sum of weights, i.e. for each
+                          node that meets all of the scheduling requirements (resource
+                          request, requiredDuringScheduling affinity expressions,
+                          etc.), compute a sum by iterating through the elements of
+                          this field and adding "weight" to the sum if the node matches
+                          the corresponding matchExpressions; the node(s) with the
+                          highest sum are the most preferred.
+                        items:
+                          description: An empty preferred scheduling term matches
+                            all objects with implicit weight 0 (i.e. it's a no-op).
+                            A null preferred scheduling term matches no objects (i.e.
+                            is also a no-op).
+                          properties:
+                            preference:
+                              description: A node selector term, associated with the
+                                corresponding weight.
+                              properties:
+                                matchExpressions:
+                                  description: A list of node selector requirements
+                                    by node's labels.
+                                  items:
+                                    description: A node selector requirement is a
+                                      selector that contains values, a key, and an
+                                      operator that relates the key and values.
+                                    properties:
+                                      key:
+                                        description: The label key that the selector
+                                          applies to.
+                                        type: string
+                                      operator:
+                                        description: Represents a key's relationship
+                                          to a set of values. Valid operators are
+                                          In, NotIn, Exists, DoesNotExist. Gt, and
+                                          Lt.
+                                        type: string
+                                      values:
+                                        description: An array of string values. If
+                                          the operator is In or NotIn, the values
+                                          array must be non-empty. If the operator
+                                          is Exists or DoesNotExist, the values array
+                                          must be empty. If the operator is Gt or
+                                          Lt, the values array must have a single
+                                          element, which will be interpreted as an
+                                          integer. This array is replaced during a
+                                          strategic merge patch.
+                                        items:
+                                          type: string
+                                        type: array
+                                    required:
+                                    - key
+                                    - operator
+                                    type: object
+                                  type: array
+                                matchFields:
+                                  description: A list of node selector requirements
+                                    by node's fields.
+                                  items:
+                                    description: A node selector requirement is a
+                                      selector that contains values, a key, and an
+                                      operator that relates the key and values.
+                                    properties:
+                                      key:
+                                        description: The label key that the selector
+                                          applies to.
+                                        type: string
+                                      operator:
+                                        description: Represents a key's relationship
+                                          to a set of values. Valid operators are
+                                          In, NotIn, Exists, DoesNotExist. Gt, and
+                                          Lt.
+                                        type: string
+                                      values:
+                                        description: An array of string values. If
+                                          the operator is In or NotIn, the values
+                                          array must be non-empty. If the operator
+                                          is Exists or DoesNotExist, the values array
+                                          must be empty. If the operator is Gt or
+                                          Lt, the values array must have a single
+                                          element, which will be interpreted as an
+                                          integer. This array is replaced during a
+                                          strategic merge patch.
+                                        items:
+                                          type: string
+                                        type: array
+                                    required:
+                                    - key
+                                    - operator
+                                    type: object
+                                  type: array
+                              type: object
+                            weight:
+                              description: Weight associated with matching the corresponding
+                                nodeSelectorTerm, in the range 1-100.
+                              format: int32
+                              type: integer
+                          required:
+                          - preference
+                          - weight
+                          type: object
+                        type: array
+                      requiredDuringSchedulingIgnoredDuringExecution:
+                        description: If the affinity requirements specified by this
+                          field are not met at scheduling time, the pod will not be
+                          scheduled onto the node. If the affinity requirements specified
+                          by this field cease to be met at some point during pod execution
+                          (e.g. due to an update), the system may or may not try to
+                          eventually evict the pod from its node.
+                        properties:
+                          nodeSelectorTerms:
+                            description: Required. A list of node selector terms.
+                              The terms are ORed.
+                            items:
+                              description: A null or empty node selector term matches
+                                no objects. The requirements of them are ANDed. The
+                                TopologySelectorTerm type implements a subset of the
+                                NodeSelectorTerm.
+                              properties:
+                                matchExpressions:
+                                  description: A list of node selector requirements
+                                    by node's labels.
+                                  items:
+                                    description: A node selector requirement is a
+                                      selector that contains values, a key, and an
+                                      operator that relates the key and values.
+                                    properties:
+                                      key:
+                                        description: The label key that the selector
+                                          applies to.
+                                        type: string
+                                      operator:
+                                        description: Represents a key's relationship
+                                          to a set of values. Valid operators are
+                                          In, NotIn, Exists, DoesNotExist. Gt, and
+                                          Lt.
+                                        type: string
+                                      values:
+                                        description: An array of string values. If
+                                          the operator is In or NotIn, the values
+                                          array must be non-empty. If the operator
+                                          is Exists or DoesNotExist, the values array
+                                          must be empty. If the operator is Gt or
+                                          Lt, the values array must have a single
+                                          element, which will be interpreted as an
+                                          integer. This array is replaced during a
+                                          strategic merge patch.
+                                        items:
+                                          type: string
+                                        type: array
+                                    required:
+                                    - key
+                                    - operator
+                                    type: object
+                                  type: array
+                                matchFields:
+                                  description: A list of node selector requirements
+                                    by node's fields.
+                                  items:
+                                    description: A node selector requirement is a
+                                      selector that contains values, a key, and an
+                                      operator that relates the key and values.
+                                    properties:
+                                      key:
+                                        description: The label key that the selector
+                                          applies to.
+                                        type: string
+                                      operator:
+                                        description: Represents a key's relationship
+                                          to a set of values. Valid operators are
+                                          In, NotIn, Exists, DoesNotExist. Gt, and
+                                          Lt.
+                                        type: string
+                                      values:
+                                        description: An array of string values. If
+                                          the operator is In or NotIn, the values
+                                          array must be non-empty. If the operator
+                                          is Exists or DoesNotExist, the values array
+                                          must be empty. If the operator is Gt or
+                                          Lt, the values array must have a single
+                                          element, which will be interpreted as an
+                                          integer. This array is replaced during a
+                                          strategic merge patch.
+                                        items:
+                                          type: string
+                                        type: array
+                                    required:
+                                    - key
+                                    - operator
+                                    type: object
+                                  type: array
+                              type: object
+                            type: array
+                        required:
+                        - nodeSelectorTerms
+                        type: object
+                    type: object
+                  podAffinity:
+                    description: Describes pod affinity scheduling rules (e.g. co-locate
+                      this pod in the same node, zone, etc. as some other pod(s)).
+                    properties:
+                      preferredDuringSchedulingIgnoredDuringExecution:
+                        description: The scheduler will prefer to schedule pods to
+                          nodes that satisfy the affinity expressions specified by
+                          this field, but it may choose a node that violates one or
+                          more of the expressions. The node that is most preferred
+                          is the one with the greatest sum of weights, i.e. for each
+                          node that meets all of the scheduling requirements (resource
+                          request, requiredDuringScheduling affinity expressions,
+                          etc.), compute a sum by iterating through the elements of
+                          this field and adding "weight" to the sum if the node has
+                          pods which matches the corresponding podAffinityTerm; the
+                          node(s) with the highest sum are the most preferred.
+                        items:
+                          description: The weights of all of the matched WeightedPodAffinityTerm
+                            fields are added per-node to find the most preferred node(s)
+                          properties:
+                            podAffinityTerm:
+                              description: Required. A pod affinity term, associated
+                                with the corresponding weight.
+                              properties:
+                                labelSelector:
+                                  description: A label query over a set of resources,
+                                    in this case pods.
+                                  properties:
+                                    matchExpressions:
+                                      description: matchExpressions is a list of label
+                                        selector requirements. The requirements are
+                                        ANDed.
+                                      items:
+                                        description: A label selector requirement
+                                          is a selector that contains values, a key,
+                                          and an operator that relates the key and
+                                          values.
+                                        properties:
+                                          key:
+                                            description: key is the label key that
+                                              the selector applies to.
+                                            type: string
+                                          operator:
+                                            description: operator represents a key's
+                                              relationship to a set of values. Valid
+                                              operators are In, NotIn, Exists and
+                                              DoesNotExist.
+                                            type: string
+                                          values:
+                                            description: values is an array of string
+                                              values. If the operator is In or NotIn,
+                                              the values array must be non-empty.
+                                              If the operator is Exists or DoesNotExist,
+                                              the values array must be empty. This
+                                              array is replaced during a strategic
+                                              merge patch.
+                                            items:
+                                              type: string
+                                            type: array
+                                        required:
+                                        - key
+                                        - operator
+                                        type: object
+                                      type: array
+                                    matchLabels:
+                                      additionalProperties:
+                                        type: string
+                                      description: matchLabels is a map of {key,value}
+                                        pairs. A single {key,value} in the matchLabels
+                                        map is equivalent to an element of matchExpressions,
+                                        whose key field is "key", the operator is
+                                        "In", and the values array contains only "value".
+                                        The requirements are ANDed.
+                                      type: object
+                                  type: object
+                                namespaces:
+                                  description: namespaces specifies which namespaces
+                                    the labelSelector applies to (matches against);
+                                    null or empty list means "this pod's namespace"
+                                  items:
+                                    type: string
+                                  type: array
+                                topologyKey:
+                                  description: This pod should be co-located (affinity)
+                                    or not co-located (anti-affinity) with the pods
+                                    matching the labelSelector in the specified namespaces,
+                                    where co-located is defined as running on a node
+                                    whose value of the label with key topologyKey
+                                    matches that of any node on which any of the selected
+                                    pods is running. Empty topologyKey is not allowed.
+                                  type: string
+                              required:
+                              - topologyKey
+                              type: object
+                            weight:
+                              description: weight associated with matching the corresponding
+                                podAffinityTerm, in the range 1-100.
+                              format: int32
+                              type: integer
+                          required:
+                          - podAffinityTerm
+                          - weight
+                          type: object
+                        type: array
+                      requiredDuringSchedulingIgnoredDuringExecution:
+                        description: If the affinity requirements specified by this
+                          field are not met at scheduling time, the pod will not be
+                          scheduled onto the node. If the affinity requirements specified
+                          by this field cease to be met at some point during pod execution
+                          (e.g. due to a pod label update), the system may or may
+                          not try to eventually evict the pod from its node. When
+                          there are multiple elements, the lists of nodes corresponding
+                          to each podAffinityTerm are intersected, i.e. all terms
+                          must be satisfied.
+                        items:
+                          description: Defines a set of pods (namely those matching
+                            the labelSelector relative to the given namespace(s))
+                            that this pod should be co-located (affinity) or not co-located
+                            (anti-affinity) with, where co-located is defined as running
+                            on a node whose value of the label with key <topologyKey>
+                            matches that of any node on which a pod of the set of
+                            pods is running
+                          properties:
+                            labelSelector:
+                              description: A label query over a set of resources,
+                                in this case pods.
+                              properties:
+                                matchExpressions:
+                                  description: matchExpressions is a list of label
+                                    selector requirements. The requirements are ANDed.
+                                  items:
+                                    description: A label selector requirement is a
+                                      selector that contains values, a key, and an
+                                      operator that relates the key and values.
+                                    properties:
+                                      key:
+                                        description: key is the label key that the
+                                          selector applies to.
+                                        type: string
+                                      operator:
+                                        description: operator represents a key's relationship
+                                          to a set of values. Valid operators are
+                                          In, NotIn, Exists and DoesNotExist.
+                                        type: string
+                                      values:
+                                        description: values is an array of string
+                                          values. If the operator is In or NotIn,
+                                          the values array must be non-empty. If the
+                                          operator is Exists or DoesNotExist, the
+                                          values array must be empty. This array is
+                                          replaced during a strategic merge patch.
+                                        items:
+                                          type: string
+                                        type: array
+                                    required:
+                                    - key
+                                    - operator
+                                    type: object
+                                  type: array
+                                matchLabels:
+                                  additionalProperties:
+                                    type: string
+                                  description: matchLabels is a map of {key,value}
+                                    pairs. A single {key,value} in the matchLabels
+                                    map is equivalent to an element of matchExpressions,
+                                    whose key field is "key", the operator is "In",
+                                    and the values array contains only "value". The
+                                    requirements are ANDed.
+                                  type: object
+                              type: object
+                            namespaces:
+                              description: namespaces specifies which namespaces the
+                                labelSelector applies to (matches against); null or
+                                empty list means "this pod's namespace"
+                              items:
+                                type: string
+                              type: array
+                            topologyKey:
+                              description: This pod should be co-located (affinity)
+                                or not co-located (anti-affinity) with the pods matching
+                                the labelSelector in the specified namespaces, where
+                                co-located is defined as running on a node whose value
+                                of the label with key topologyKey matches that of
+                                any node on which any of the selected pods is running.
+                                Empty topologyKey is not allowed.
+                              type: string
+                          required:
+                          - topologyKey
+                          type: object
+                        type: array
+                    type: object
+                  podAntiAffinity:
+                    description: Describes pod anti-affinity scheduling rules (e.g.
+                      avoid putting this pod in the same node, zone, etc. as some
+                      other pod(s)).
+                    properties:
+                      preferredDuringSchedulingIgnoredDuringExecution:
+                        description: The scheduler will prefer to schedule pods to
+                          nodes that satisfy the anti-affinity expressions specified
+                          by this field, but it may choose a node that violates one
+                          or more of the expressions. The node that is most preferred
+                          is the one with the greatest sum of weights, i.e. for each
+                          node that meets all of the scheduling requirements (resource
+                          request, requiredDuringScheduling anti-affinity expressions,
+                          etc.), compute a sum by iterating through the elements of
+                          this field and adding "weight" to the sum if the node has
+                          pods which matches the corresponding podAffinityTerm; the
+                          node(s) with the highest sum are the most preferred.
+                        items:
+                          description: The weights of all of the matched WeightedPodAffinityTerm
+                            fields are added per-node to find the most preferred node(s)
+                          properties:
+                            podAffinityTerm:
+                              description: Required. A pod affinity term, associated
+                                with the corresponding weight.
+                              properties:
+                                labelSelector:
+                                  description: A label query over a set of resources,
+                                    in this case pods.
+                                  properties:
+                                    matchExpressions:
+                                      description: matchExpressions is a list of label
+                                        selector requirements. The requirements are
+                                        ANDed.
+                                      items:
+                                        description: A label selector requirement
+                                          is a selector that contains values, a key,
+                                          and an operator that relates the key and
+                                          values.
+                                        properties:
+                                          key:
+                                            description: key is the label key that
+                                              the selector applies to.
+                                            type: string
+                                          operator:
+                                            description: operator represents a key's
+                                              relationship to a set of values. Valid
+                                              operators are In, NotIn, Exists and
+                                              DoesNotExist.
+                                            type: string
+                                          values:
+                                            description: values is an array of string
+                                              values. If the operator is In or NotIn,
+                                              the values array must be non-empty.
+                                              If the operator is Exists or DoesNotExist,
+                                              the values array must be empty. This
+                                              array is replaced during a strategic
+                                              merge patch.
+                                            items:
+                                              type: string
+                                            type: array
+                                        required:
+                                        - key
+                                        - operator
+                                        type: object
+                                      type: array
+                                    matchLabels:
+                                      additionalProperties:
+                                        type: string
+                                      description: matchLabels is a map of {key,value}
+                                        pairs. A single {key,value} in the matchLabels
+                                        map is equivalent to an element of matchExpressions,
+                                        whose key field is "key", the operator is
+                                        "In", and the values array contains only "value".
+                                        The requirements are ANDed.
+                                      type: object
+                                  type: object
+                                namespaces:
+                                  description: namespaces specifies which namespaces
+                                    the labelSelector applies to (matches against);
+                                    null or empty list means "this pod's namespace"
+                                  items:
+                                    type: string
+                                  type: array
+                                topologyKey:
+                                  description: This pod should be co-located (affinity)
+                                    or not co-located (anti-affinity) with the pods
+                                    matching the labelSelector in the specified namespaces,
+                                    where co-located is defined as running on a node
+                                    whose value of the label with key topologyKey
+                                    matches that of any node on which any of the selected
+                                    pods is running. Empty topologyKey is not allowed.
+                                  type: string
+                              required:
+                              - topologyKey
+                              type: object
+                            weight:
+                              description: weight associated with matching the corresponding
+                                podAffinityTerm, in the range 1-100.
+                              format: int32
+                              type: integer
+                          required:
+                          - podAffinityTerm
+                          - weight
+                          type: object
+                        type: array
+                      requiredDuringSchedulingIgnoredDuringExecution:
+                        description: If the anti-affinity requirements specified by
+                          this field are not met at scheduling time, the pod will
+                          not be scheduled onto the node. If the anti-affinity requirements
+                          specified by this field cease to be met at some point during
+                          pod execution (e.g. due to a pod label update), the system
+                          may or may not try to eventually evict the pod from its
+                          node. When there are multiple elements, the lists of nodes
+                          corresponding to each podAffinityTerm are intersected, i.e.
+                          all terms must be satisfied.
+                        items:
+                          description: Defines a set of pods (namely those matching
+                            the labelSelector relative to the given namespace(s))
+                            that this pod should be co-located (affinity) or not co-located
+                            (anti-affinity) with, where co-located is defined as running
+                            on a node whose value of the label with key <topologyKey>
+                            matches that of any node on which a pod of the set of
+                            pods is running
+                          properties:
+                            labelSelector:
+                              description: A label query over a set of resources,
+                                in this case pods.
+                              properties:
+                                matchExpressions:
+                                  description: matchExpressions is a list of label
+                                    selector requirements. The requirements are ANDed.
+                                  items:
+                                    description: A label selector requirement is a
+                                      selector that contains values, a key, and an
+                                      operator that relates the key and values.
+                                    properties:
+                                      key:
+                                        description: key is the label key that the
+                                          selector applies to.
+                                        type: string
+                                      operator:
+                                        description: operator represents a key's relationship
+                                          to a set of values. Valid operators are
+                                          In, NotIn, Exists and DoesNotExist.
+                                        type: string
+                                      values:
+                                        description: values is an array of string
+                                          values. If the operator is In or NotIn,
+                                          the values array must be non-empty. If the
+                                          operator is Exists or DoesNotExist, the
+                                          values array must be empty. This array is
+                                          replaced during a strategic merge patch.
+                                        items:
+                                          type: string
+                                        type: array
+                                    required:
+                                    - key
+                                    - operator
+                                    type: object
+                                  type: array
+                                matchLabels:
+                                  additionalProperties:
+                                    type: string
+                                  description: matchLabels is a map of {key,value}
+                                    pairs. A single {key,value} in the matchLabels
+                                    map is equivalent to an element of matchExpressions,
+                                    whose key field is "key", the operator is "In",
+                                    and the values array contains only "value". The
+                                    requirements are ANDed.
+                                  type: object
+                              type: object
+                            namespaces:
+                              description: namespaces specifies which namespaces the
+                                labelSelector applies to (matches against); null or
+                                empty list means "this pod's namespace"
+                              items:
+                                type: string
+                              type: array
+                            topologyKey:
+                              description: This pod should be co-located (affinity)
+                                or not co-located (anti-affinity) with the pods matching
+                                the labelSelector in the specified namespaces, where
+                                co-located is defined as running on a node whose value
+                                of the label with key topologyKey matches that of
+                                any node on which any of the selected pods is running.
+                                Empty topologyKey is not allowed.
+                              type: string
+                          required:
+                          - topologyKey
+                          type: object
+                        type: array
+                    type: object
+                type: object
+              automountServiceAccountToken:
+                description: '(Optional) AutomountServiceAccountToken determines whether
+                  or not the stateful set pods should automount the service account
+                  token. This is the default behavior in Kubernetes. For backward
+                  compatibility reasons, this value defaults to `false` here. Default:
+                  false'
+                type: boolean
+              cache:
+                description: '(Optional) The total size for caches (`--cache` command
+                  line parameter) Default: "25%"'
+                type: string
+              clientTLSSecret:
+                description: '(Optional) The secret with a certificate and a private
+                  key for root database user Default: ""'
+                type: string
+              cockroachDBVersion:
+                description: '(Optional) CockroachDBVersion sets the explicit version
+                  of the cockroachDB image Default: ""'
+                type: string
+              dataStore:
+                description: Database disk storage configuration
+                properties:
+                  hostPath:
+                    description: (Optional) Directory from the host node's filesystem
+                    properties:
+                      path:
+                        description: 'Path of the directory on the host. If the path
+                          is a symlink, it will follow the link to the real path.
+                          More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath'
+                        type: string
+                      type:
+                        description: 'Type for HostPath Volume Defaults to "" More
+                          info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath'
+                        type: string
+                    required:
+                    - path
+                    type: object
+                  pvc:
+                    description: (Optional) Persistent volume to use
+                    properties:
+                      source:
+                        description: (Optional) Existing PVC in the same namespace
+                        properties:
+                          claimName:
+                            description: 'ClaimName is the name of a PersistentVolumeClaim
+                              in the same namespace as the pod using this volume.
+                              More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
+                            type: string
+                          readOnly:
+                            description: Will force the ReadOnly setting in VolumeMounts.
+                              Default false.
+                            type: boolean
+                        required:
+                        - claimName
+                        type: object
+                      spec:
+                        description: (Optional) PVC to request a new persistent volume
+                        properties:
+                          accessModes:
+                            description: 'AccessModes contains the desired access
+                              modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+                            items:
+                              type: string
+                            type: array
+                          dataSource:
+                            description: 'This field can be used to specify either:
+                              * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+                              * An existing PVC (PersistentVolumeClaim) * An existing
+                              custom resource that implements data population (Alpha)
+                              In order to use custom resource types that implement
+                              data population, the AnyVolumeDataSource feature gate
+                              must be enabled. If the provisioner or an external controller
+                              can support the specified data source, it will create
+                              a new volume based on the contents of the specified
+                              data source.'
+                            properties:
+                              apiGroup:
+                                description: APIGroup is the group for the resource
+                                  being referenced. If APIGroup is not specified,
+                                  the specified Kind must be in the core API group.
+                                  For any other third-party types, APIGroup is required.
+                                type: string
+                              kind:
+                                description: Kind is the type of resource being referenced
+                                type: string
+                              name:
+                                description: Name is the name of resource being referenced
+                                type: string
+                            required:
+                            - kind
+                            - name
+                            type: object
+                          resources:
+                            description: 'Resources represents the minimum resources
+                              the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
+                            properties:
+                              limits:
+                                additionalProperties:
+                                  anyOf:
+                                  - type: integer
+                                  - type: string
+                                  pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+                                  x-kubernetes-int-or-string: true
+                                description: 'Limits describes the maximum amount
+                                  of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+                                type: object
+                              requests:
+                                additionalProperties:
+                                  anyOf:
+                                  - type: integer
+                                  - type: string
+                                  pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+                                  x-kubernetes-int-or-string: true
+                                description: 'Requests describes the minimum amount
+                                  of compute resources required. If Requests is omitted
+                                  for a container, it defaults to Limits if that is
+                                  explicitly specified, otherwise to an implementation-defined
+                                  value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+                                type: object
+                            type: object
+                          selector:
+                            description: A label query over volumes to consider for
+                              binding.
+                            properties:
+                              matchExpressions:
+                                description: matchExpressions is a list of label selector
+                                  requirements. The requirements are ANDed.
+                                items:
+                                  description: A label selector requirement is a selector
+                                    that contains values, a key, and an operator that
+                                    relates the key and values.
+                                  properties:
+                                    key:
+                                      description: key is the label key that the selector
+                                        applies to.
+                                      type: string
+                                    operator:
+                                      description: operator represents a key's relationship
+                                        to a set of values. Valid operators are In,
+                                        NotIn, Exists and DoesNotExist.
+                                      type: string
+                                    values:
+                                      description: values is an array of string values.
+                                        If the operator is In or NotIn, the values
+                                        array must be non-empty. If the operator is
+                                        Exists or DoesNotExist, the values array must
+                                        be empty. This array is replaced during a
+                                        strategic merge patch.
+                                      items:
+                                        type: string
+                                      type: array
+                                  required:
+                                  - key
+                                  - operator
+                                  type: object
+                                type: array
+                              matchLabels:
+                                additionalProperties:
+                                  type: string
+                                description: matchLabels is a map of {key,value} pairs.
+                                  A single {key,value} in the matchLabels map is equivalent
+                                  to an element of matchExpressions, whose key field
+                                  is "key", the operator is "In", and the values array
+                                  contains only "value". The requirements are ANDed.
+                                type: object
+                            type: object
+                          storageClassName:
+                            description: 'Name of the StorageClass required by the
+                              claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+                            type: string
+                          volumeMode:
+                            description: volumeMode defines what type of volume is
+                              required by the claim. Value of Filesystem is implied
+                              when not included in claim spec.
+                            type: string
+                          volumeName:
+                            description: VolumeName is the binding reference to the
+                              PersistentVolume backing this claim.
+                            type: string
+                        type: object
+                    type: object
+                  supportsAutoResize:
+                    description: '(Optional) SupportsAutoResize marks that a PVC will
+                      resize without restarting the entire cluster Default: false'
+                    type: boolean
+                type: object
+              grpcPort:
+                description: '(Optional) The database port (`--port` CLI parameter
+                  when starting the service) Default: 26258'
+                format: int32
+                type: integer
+              httpPort:
+                description: '(Optional) The web UI port (`--http-port` CLI parameter
+                  when starting the service) Default: 8080'
+                format: int32
+                type: integer
+              image:
+                description: (Optional) Container image information
+                properties:
+                  name:
+                    description: 'Container image with supported CockroachDB version.
+                      This defaults to the version pinned to the operator and requires
+                      a full container and tag/sha name. For instance: cockroachdb/cockroachdb:v20.1'
+                    type: string
+                  pullPolicy:
+                    description: '(Optional) PullPolicy for the image, which defaults
+                      to IfNotPresent. Default: IfNotPresent'
+                    type: string
+                  pullSecret:
+                    description: (Optional) Secret name containing the dockerconfig
+                      to use for a registry that requires authentication. The secret
+                      must be configured first by the user.
+                    type: string
+                required:
+                - name
+                type: object
+              ingress:
+                description: (Optional) Ingress defines the Ingress configuration
+                  used to expose the services using Ingress
+                properties:
+                  sql:
+                    description: (Optional) Ingress options for SQL connections Adding/changing
+                      the SQL host will result in rolling update of the crdb cluster
+                      nodes
+                    properties:
+                      annotations:
+                        additionalProperties:
+                          type: string
+                        description: (Optional) Annotations related to ingress resource
+                        type: object
+                      host:
+                        description: host is host to be used for exposing service
+                        type: string
+                      ingressClassName:
+                        description: (Optional) IngressClassName to be used by ingress
+                          resource
+                        type: string
+                      tls:
+                        description: (Optional) TLS describes the TLS certificate
+                          info
+                        items:
+                          description: IngressTLS describes the transport layer security
+                            associated with an Ingress.
+                          properties:
+                            hosts:
+                              description: Hosts are a list of hosts included in the
+                                TLS certificate. The values in this list must match
+                                the name/s used in the tlsSecret. Defaults to the
+                                wildcard host setting for the loadbalancer controller
+                                fulfilling this Ingress, if left unspecified.
+                              items:
+                                type: string
+                              type: array
+                              x-kubernetes-list-type: atomic
+                            secretName:
+                              description: SecretName is the name of the secret used
+                                to terminate TLS traffic on port 443. Field is left
+                                optional to allow TLS routing based on SNI hostname
+                                alone. If the SNI host in a listener conflicts with
+                                the "Host" header field used by an IngressRule, the
+                                SNI host is used for termination and value of the
+                                Host header is used for routing.
+                              type: string
+                          type: object
+                        type: array
+                    required:
+                    - host
+                    type: object
+                  ui:
+                    description: (Optional) Ingress options for UI (HTTP) connections
+                    properties:
+                      annotations:
+                        additionalProperties:
+                          type: string
+                        description: (Optional) Annotations related to ingress resource
+                        type: object
+                      host:
+                        description: host is host to be used for exposing service
+                        type: string
+                      ingressClassName:
+                        description: (Optional) IngressClassName to be used by ingress
+                          resource
+                        type: string
+                      tls:
+                        description: (Optional) TLS describes the TLS certificate
+                          info
+                        items:
+                          description: IngressTLS describes the transport layer security
+                            associated with an Ingress.
+                          properties:
+                            hosts:
+                              description: Hosts are a list of hosts included in the
+                                TLS certificate. The values in this list must match
+                                the name/s used in the tlsSecret. Defaults to the
+                                wildcard host setting for the loadbalancer controller
+                                fulfilling this Ingress, if left unspecified.
+                              items:
+                                type: string
+                              type: array
+                              x-kubernetes-list-type: atomic
+                            secretName:
+                              description: SecretName is the name of the secret used
+                                to terminate TLS traffic on port 443. Field is left
+                                optional to allow TLS routing based on SNI hostname
+                                alone. If the SNI host in a listener conflicts with
+                                the "Host" header field used by an IngressRule, the
+                                SNI host is used for termination and value of the
+                                Host header is used for routing.
+                              type: string
+                          type: object
+                        type: array
+                    required:
+                    - host
+                    type: object
+                type: object
+              logConfigMap:
+                description: '(Optional) LogConfigMap define the config map which
+                  contains log configuration used to send the logs through the proper
+                  channels in the cockroachdb. Logging configuration is available
+                  for cockroach version v21.1.0 onwards. The logging configuration
+                  is taken in format of yaml file, you can check the logging configuration
+                  here (https://www.cockroachlabs.com/docs/stable/configure-logs.html#default-logging-configuration)
+                  The default logging for cockroach version v20.x or less is stderr,
+                  logging API is ignored for older versions. NOTE: The `data` field
+                  of map must contain an entry called `logging.yaml` that contains
+                  config options.'
+                type: string
+              maxSQLMemory:
+                description: '(Optional) The maximum in-memory storage capacity available
+                  to store temporary data for SQL queries (`--max-sql-memory` parameter)
+                  Default: "25%"'
+                type: string
+              maxUnavailable:
+                description: (Optional) The maximum number of pods that can be unavailable
+                  during a rolling update. This number is set in the PodDistruptionBudget
+                  and defaults to 1.
+                format: int32
+                type: integer
+              minAvailable:
+                description: (Optional) The min number of pods that can be unavailable
+                  during a rolling update. This number is set in the PodDistruptionBudget
+                  and defaults to 1.
+                format: int32
+                type: integer
+              nodeSelector:
+                additionalProperties:
+                  type: string
+                description: (Optional) If specified, the pod's nodeSelector
+                type: object
+              nodeTLSSecret:
+                description: '(Optional) The secret with certificates and a private
+                  key for the TLS endpoint on the database port. The standard naming
+                  of files is expected (tls.key, tls.crt, ca.crt) Default: ""'
+                type: string
+              nodes:
+                description: Number of nodes (pods) in the cluster
+                format: int32
+                minimum: 3
+                type: integer
+              podEnvVariables:
+                description: '(Optional) PodEnvVariables is a slice of environment
+                  variables that are added to the pods Default: (empty list)'
+                items:
+                  description: EnvVar represents an environment variable present in
+                    a Container.
+                  properties:
+                    name:
+                      description: Name of the environment variable. Must be a C_IDENTIFIER.
+                      type: string
+                    value:
+                      description: 'Variable references $(VAR_NAME) are expanded using
+                        the previous defined environment variables in the container
+                        and any service environment variables. If a variable cannot
+                        be resolved, the reference in the input string will be unchanged.
+                        The $(VAR_NAME) syntax can be escaped with a double $$, ie:
+                        $$(VAR_NAME). Escaped references will never be expanded, regardless
+                        of whether the variable exists or not. Defaults to "".'
+                      type: string
+                    valueFrom:
+                      description: Source for the environment variable's value. Cannot
+                        be used if value is not empty.
+                      properties:
+                        configMapKeyRef:
+                          description: Selects a key of a ConfigMap.
+                          properties:
+                            key:
+                              description: The key to select.
+                              type: string
+                            name:
+                              description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                                TODO: Add other useful fields. apiVersion, kind, uid?'
+                              type: string
+                            optional:
+                              description: Specify whether the ConfigMap or its key
+                                must be defined
+                              type: boolean
+                          required:
+                          - key
+                          type: object
+                        fieldRef:
+                          description: 'Selects a field of the pod: supports metadata.name,
+                            metadata.namespace, `metadata.labels[''<KEY>'']`, `metadata.annotations[''<KEY>'']`,
+                            spec.nodeName, spec.serviceAccountName, status.hostIP,
+                            status.podIP, status.podIPs.'
+                          properties:
+                            apiVersion:
+                              description: Version of the schema the FieldPath is
+                                written in terms of, defaults to "v1".
+                              type: string
+                            fieldPath:
+                              description: Path of the field to select in the specified
+                                API version.
+                              type: string
+                          required:
+                          - fieldPath
+                          type: object
+                        resourceFieldRef:
+                          description: 'Selects a resource of the container: only
+                            resources limits and requests (limits.cpu, limits.memory,
+                            limits.ephemeral-storage, requests.cpu, requests.memory
+                            and requests.ephemeral-storage) are currently supported.'
+                          properties:
+                            containerName:
+                              description: 'Container name: required for volumes,
+                                optional for env vars'
+                              type: string
+                            divisor:
+                              anyOf:
+                              - type: integer
+                              - type: string
+                              description: Specifies the output format of the exposed
+                                resources, defaults to "1"
+                              pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+                              x-kubernetes-int-or-string: true
+                            resource:
+                              description: 'Required: resource to select'
+                              type: string
+                          required:
+                          - resource
+                          type: object
+                        secretKeyRef:
+                          description: Selects a key of a secret in the pod's namespace
+                          properties:
+                            key:
+                              description: The key of the secret to select from.  Must
+                                be a valid secret key.
+                              type: string
+                            name:
+                              description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                                TODO: Add other useful fields. apiVersion, kind, uid?'
+                              type: string
+                            optional:
+                              description: Specify whether the Secret or its key must
+                                be defined
+                              type: boolean
+                          required:
+                          - key
+                          type: object
+                      type: object
+                  required:
+                  - name
+                  type: object
+                type: array
+              resources:
+                description: '(Optional) Database container resource limits. Any container
+                  limits can be specified. Default: (not specified)'
+                properties:
+                  limits:
+                    additionalProperties:
+                      anyOf:
+                      - type: integer
+                      - type: string
+                      pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+                      x-kubernetes-int-or-string: true
+                    description: 'Limits describes the maximum amount of compute resources
+                      allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+                    type: object
+                  requests:
+                    additionalProperties:
+                      anyOf:
+                      - type: integer
+                      - type: string
+                      pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+                      x-kubernetes-int-or-string: true
+                    description: 'Requests describes the minimum amount of compute
+                      resources required. If Requests is omitted for a container,
+                      it defaults to Limits if that is explicitly specified, otherwise
+                      to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+                    type: object
+                type: object
+              sqlPort:
+                description: '(Optional) The SQL Port number Default: 26257'
+                format: int32
+                type: integer
+              tlsEnabled:
+                description: (Optional) TLSEnabled determines if TLS is enabled for
+                  your CockroachDB Cluster
+                type: boolean
+              tolerations:
+                description: (Optional) Tolerations for scheduling pods onto some
+                  dedicated nodes
+                items:
+                  description: The pod this Toleration is attached to tolerates any
+                    taint that matches the triple <key,value,effect> using the matching
+                    operator <operator>.
+                  properties:
+                    effect:
+                      description: Effect indicates the taint effect to match. Empty
+                        means match all taint effects. When specified, allowed values
+                        are NoSchedule, PreferNoSchedule and NoExecute.
+                      type: string
+                    key:
+                      description: Key is the taint key that the toleration applies
+                        to. Empty means match all taint keys. If the key is empty,
+                        operator must be Exists; this combination means to match all
+                        values and all keys.
+                      type: string
+                    operator:
+                      description: Operator represents a key's relationship to the
+                        value. Valid operators are Exists and Equal. Defaults to Equal.
+                        Exists is equivalent to wildcard for value, so that a pod
+                        can tolerate all taints of a particular category.
+                      type: string
+                    tolerationSeconds:
+                      description: TolerationSeconds represents the period of time
+                        the toleration (which must be of effect NoExecute, otherwise
+                        this field is ignored) tolerates the taint. By default, it
+                        is not set, which means tolerate the taint forever (do not
+                        evict). Zero and negative values will be treated as 0 (evict
+                        immediately) by the system.
+                      format: int64
+                      type: integer
+                    value:
+                      description: Value is the taint value the toleration matches
+                        to. If the operator is Exists, the value should be empty,
+                        otherwise just a regular string.
+                      type: string
+                  type: object
+                type: array
+              topologySpreadConstraints:
+                description: (Optional) If specified, the pod's topology spread constraints
+                items:
+                  description: TopologySpreadConstraint specifies how to spread matching
+                    pods among the given topology.
+                  properties:
+                    labelSelector:
+                      description: LabelSelector is used to find matching pods. Pods
+                        that match this label selector are counted to determine the
+                        number of pods in their corresponding topology domain.
+                      properties:
+                        matchExpressions:
+                          description: matchExpressions is a list of label selector
+                            requirements. The requirements are ANDed.
+                          items:
+                            description: A label selector requirement is a selector
+                              that contains values, a key, and an operator that relates
+                              the key and values.
+                            properties:
+                              key:
+                                description: key is the label key that the selector
+                                  applies to.
+                                type: string
+                              operator:
+                                description: operator represents a key's relationship
+                                  to a set of values. Valid operators are In, NotIn,
+                                  Exists and DoesNotExist.
+                                type: string
+                              values:
+                                description: values is an array of string values.
+                                  If the operator is In or NotIn, the values array
+                                  must be non-empty. If the operator is Exists or
+                                  DoesNotExist, the values array must be empty. This
+                                  array is replaced during a strategic merge patch.
+                                items:
+                                  type: string
+                                type: array
+                            required:
+                            - key
+                            - operator
+                            type: object
+                          type: array
+                        matchLabels:
+                          additionalProperties:
+                            type: string
+                          description: matchLabels is a map of {key,value} pairs.
+                            A single {key,value} in the matchLabels map is equivalent
+                            to an element of matchExpressions, whose key field is
+                            "key", the operator is "In", and the values array contains
+                            only "value". The requirements are ANDed.
+                          type: object
+                      type: object
+                    maxSkew:
+                      description: 'MaxSkew describes the degree to which pods may
+                        be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`,
+                        it is the maximum permitted difference between the number
+                        of matching pods in the target topology and the global minimum.
+                        For example, in a 3-zone cluster, MaxSkew is set to 1, and
+                        pods with the same labelSelector spread as 1/1/0: | zone1
+                        | zone2 | zone3 | |   P   |   P   |       | - if MaxSkew is
+                        1, incoming pod can only be scheduled to zone3 to become 1/1/1;
+                        scheduling it onto zone1(zone2) would make the ActualSkew(2-0)
+                        on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming
+                        pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`,
+                        it is used to give higher precedence to topologies that satisfy
+                        it. It''s a required field. Default value is 1 and 0 is not
+                        allowed.'
+                      format: int32
+                      type: integer
+                    topologyKey:
+                      description: TopologyKey is the key of node labels. Nodes that
+                        have a label with this key and identical values are considered
+                        to be in the same topology. We consider each <key, value>
+                        as a "bucket", and try to put balanced number of pods into
+                        each bucket. It's a required field.
+                      type: string
+                    whenUnsatisfiable:
+                      description: 'WhenUnsatisfiable indicates how to deal with a
+                        pod if it doesn''t satisfy the spread constraint. - DoNotSchedule
+                        (default) tells the scheduler not to schedule it. - ScheduleAnyway
+                        tells the scheduler to schedule the pod in any location,   but
+                        giving higher precedence to topologies that would help reduce
+                        the   skew. A constraint is considered "Unsatisfiable" for
+                        an incoming pod if and only if every possible node assigment
+                        for that pod would violate "MaxSkew" on some topology. For
+                        example, in a 3-zone cluster, MaxSkew is set to 1, and pods
+                        with the same labelSelector spread as 3/1/1: | zone1 | zone2
+                        | zone3 | | P P P |   P   |   P   | If WhenUnsatisfiable is
+                        set to DoNotSchedule, incoming pod can only be scheduled to
+                        zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on
+                        zone2(zone3) satisfies MaxSkew(1). In other words, the cluster
+                        can still be imbalanced, but scheduler won''t make it *more*
+                        imbalanced. It''s a required field.'
+                      type: string
+                  required:
+                  - maxSkew
+                  - topologyKey
+                  - whenUnsatisfiable
+                  type: object
+                type: array
+            required:
+            - dataStore
+            - nodes
+            type: object
+          status:
+            description: CrdbClusterStatus defines the observed state of Cluster
+            properties:
+              clusterStatus:
+                description: OperatorStatus represent the status of the operator(Failed,
+                  Starting, Running or Other)
+                type: string
+              conditions:
+                description: List of conditions representing the current status of
+                  the cluster resource.
+                items:
+                  description: ClusterCondition represents cluster status as it is
+                    perceived by the operator
+                  properties:
+                    lastTransitionTime:
+                      description: The time when the condition was updated
+                      format: date-time
+                      type: string
+                    status:
+                      description: 'Condition status: True, False or Unknown'
+                      type: string
+                    type:
+                      description: Type/Name of the condition
+                      type: string
+                  required:
+                  - lastTransitionTime
+                  - status
+                  - type
+                  type: object
+                type: array
+              crdbcontainerimage:
+                description: CrdbContainerImage is the container that will be installed
+                type: string
+              operatorActions:
+                items:
+                  description: ClusterAction represents cluster status as it is perceived
+                    by the operator
+                  properties:
+                    lastTransitionTime:
+                      description: The time when the condition was updated
+                      format: date-time
+                      type: string
+                    message:
+                      description: (Optional) Message related to the status of the
+                        action
+                      type: string
+                    status:
+                      description: 'Action status: Failed, Finished or Unknown'
+                      type: string
+                    type:
+                      description: Type/Name of the action
+                      type: string
+                  required:
+                  - lastTransitionTime
+                  - status
+                  - type
+                  type: object
+                type: array
+              sqlHost:
+                description: SQLHost is the host to be used with SQL ingress
+                type: string
+              version:
+                description: Database service version. Not populated and is just a
+                  placeholder currently.
+                type: string
+            type: object
+        type: object
+    served: true
+    storage: true
+    subresources:
+      status: {}
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
diff --git a/manifests/cockroachdb/operator.yaml b/manifests/cockroachdb/operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2be72d329b48bc6f45d66f811c299140cda85e27
--- /dev/null
+++ b/manifests/cockroachdb/operator.yaml
@@ -0,0 +1,616 @@
+# Copyright 2022 The Cockroach Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: Namespace
+metadata:
+  labels:
+    control-plane: cockroach-operator
+  name: cockroach-operator-system
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    app: cockroach-operator
+  name: cockroach-operator-sa
+  namespace: cockroach-operator-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  creationTimestamp: null
+  name: cockroach-operator-role
+rules:
+- apiGroups:
+  - admissionregistration.k8s.io
+  resources:
+  - mutatingwebhookconfigurations
+  verbs:
+  - get
+  - patch
+  - update
+- apiGroups:
+  - admissionregistration.k8s.io
+  resources:
+  - validatingwebhookconfigurations
+  verbs:
+  - get
+  - patch
+  - update
+- apiGroups:
+  - apps
+  resources:
+  - statefulsets
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - apps
+  resources:
+  - statefulsets/finalizers
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - apps
+  resources:
+  - statefulsets/scale
+  verbs:
+  - get
+  - update
+  - watch
+- apiGroups:
+  - apps
+  resources:
+  - statefulsets/status
+  verbs:
+  - get
+  - patch
+  - update
+- apiGroups:
+  - batch
+  resources:
+  - jobs
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs/finalizers
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs/status
+  verbs:
+  - get
+- apiGroups:
+  - certificates.k8s.io
+  resources:
+  - certificatesigningrequests
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - watch
+- apiGroups:
+  - certificates.k8s.io
+  resources:
+  - certificatesigningrequests/approval
+  verbs:
+  - update
+- apiGroups:
+  - certificates.k8s.io
+  resources:
+  - certificatesigningrequests/status
+  verbs:
+  - get
+  - patch
+  - update
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - configmaps/status
+  verbs:
+  - get
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  verbs:
+  - get
+  - list
+- apiGroups:
+  - ""
+  resources:
+  - persistentvolumeclaims
+  verbs:
+  - list
+  - update
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  verbs:
+  - delete
+  - deletecollection
+  - get
+  - list
+- apiGroups:
+  - ""
+  resources:
+  - pods/exec
+  verbs:
+  - create
+- apiGroups:
+  - ""
+  resources:
+  - pods/log
+  verbs:
+  - get
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - create
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - serviceaccounts
+  verbs:
+  - create
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - services
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - services/finalizers
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - services/status
+  verbs:
+  - get
+  - patch
+  - update
+- apiGroups:
+  - crdb.cockroachlabs.com
+  resources:
+  - crdbclusters
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - crdb.cockroachlabs.com
+  resources:
+  - crdbclusters/finalizers
+  verbs:
+  - update
+- apiGroups:
+  - crdb.cockroachlabs.com
+  resources:
+  - crdbclusters/status
+  verbs:
+  - get
+  - patch
+  - update
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingresses
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingresses/finalizers
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingresses/status
+  verbs:
+  - get
+- apiGroups:
+  - policy
+  resources:
+  - poddisruptionbudgets
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - policy
+  resources:
+  - poddisruptionbudgets/finalizers
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - policy
+  resources:
+  - poddisruptionbudgets/status
+  verbs:
+  - get
+- apiGroups:
+  - rbac.authorization.k8s.io
+  resources:
+  - rolebindings
+  verbs:
+  - create
+  - get
+  - list
+  - watch
+- apiGroups:
+  - rbac.authorization.k8s.io
+  resources:
+  - roles
+  verbs:
+  - create
+  - get
+  - list
+  - watch
+- apiGroups:
+  - security.openshift.io
+  resources:
+  - securitycontextconstraints
+  verbs:
+  - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: cockroach-operator-rolebinding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: cockroach-operator-role
+subjects:
+- kind: ServiceAccount
+  name: cockroach-operator-sa
+  namespace: cockroach-operator-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    control-plane: cockroach-operator
+  name: cockroach-operator-webhook-service
+  namespace: cockroach-operator-system
+spec:
+  ports:
+  - port: 443
+    targetPort: 9443
+  selector:
+    app: cockroach-operator
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    app: cockroach-operator
+  name: cockroach-operator-manager
+  namespace: cockroach-operator-system
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: cockroach-operator
+  template:
+    metadata:
+      labels:
+        app: cockroach-operator
+    spec:
+      containers:
+      - args:
+        - -zap-log-level
+        - info
+        env:
+        - name: RELATED_IMAGE_COCKROACH_v20_1_4
+          value: cockroachdb/cockroach:v20.1.4
+        - name: RELATED_IMAGE_COCKROACH_v20_1_5
+          value: cockroachdb/cockroach:v20.1.5
+        - name: RELATED_IMAGE_COCKROACH_v20_1_8
+          value: cockroachdb/cockroach:v20.1.8
+        - name: RELATED_IMAGE_COCKROACH_v20_1_11
+          value: cockroachdb/cockroach:v20.1.11
+        - name: RELATED_IMAGE_COCKROACH_v20_1_12
+          value: cockroachdb/cockroach:v20.1.12
+        - name: RELATED_IMAGE_COCKROACH_v20_1_13
+          value: cockroachdb/cockroach:v20.1.13
+        - name: RELATED_IMAGE_COCKROACH_v20_1_15
+          value: cockroachdb/cockroach:v20.1.15
+        - name: RELATED_IMAGE_COCKROACH_v20_1_16
+          value: cockroachdb/cockroach:v20.1.16
+        - name: RELATED_IMAGE_COCKROACH_v20_1_17
+          value: cockroachdb/cockroach:v20.1.17
+        - name: RELATED_IMAGE_COCKROACH_v20_2_0
+          value: cockroachdb/cockroach:v20.2.0
+        - name: RELATED_IMAGE_COCKROACH_v20_2_1
+          value: cockroachdb/cockroach:v20.2.1
+        - name: RELATED_IMAGE_COCKROACH_v20_2_2
+          value: cockroachdb/cockroach:v20.2.2
+        - name: RELATED_IMAGE_COCKROACH_v20_2_3
+          value: cockroachdb/cockroach:v20.2.3
+        - name: RELATED_IMAGE_COCKROACH_v20_2_4
+          value: cockroachdb/cockroach:v20.2.4
+        - name: RELATED_IMAGE_COCKROACH_v20_2_5
+          value: cockroachdb/cockroach:v20.2.5
+        - name: RELATED_IMAGE_COCKROACH_v20_2_6
+          value: cockroachdb/cockroach:v20.2.6
+        - name: RELATED_IMAGE_COCKROACH_v20_2_8
+          value: cockroachdb/cockroach:v20.2.8
+        - name: RELATED_IMAGE_COCKROACH_v20_2_9
+          value: cockroachdb/cockroach:v20.2.9
+        - name: RELATED_IMAGE_COCKROACH_v20_2_10
+          value: cockroachdb/cockroach:v20.2.10
+        - name: RELATED_IMAGE_COCKROACH_v20_2_11
+          value: cockroachdb/cockroach:v20.2.11
+        - name: RELATED_IMAGE_COCKROACH_v20_2_12
+          value: cockroachdb/cockroach:v20.2.12
+        - name: RELATED_IMAGE_COCKROACH_v20_2_13
+          value: cockroachdb/cockroach:v20.2.13
+        - name: RELATED_IMAGE_COCKROACH_v20_2_14
+          value: cockroachdb/cockroach:v20.2.14
+        - name: RELATED_IMAGE_COCKROACH_v20_2_15
+          value: cockroachdb/cockroach:v20.2.15
+        - name: RELATED_IMAGE_COCKROACH_v20_2_16
+          value: cockroachdb/cockroach:v20.2.16
+        - name: RELATED_IMAGE_COCKROACH_v20_2_17
+          value: cockroachdb/cockroach:v20.2.17
+        - name: RELATED_IMAGE_COCKROACH_v20_2_18
+          value: cockroachdb/cockroach:v20.2.18
+        - name: RELATED_IMAGE_COCKROACH_v20_2_19
+          value: cockroachdb/cockroach:v20.2.19
+        - name: RELATED_IMAGE_COCKROACH_v21_1_0
+          value: cockroachdb/cockroach:v21.1.0
+        - name: RELATED_IMAGE_COCKROACH_v21_1_1
+          value: cockroachdb/cockroach:v21.1.1
+        - name: RELATED_IMAGE_COCKROACH_v21_1_2
+          value: cockroachdb/cockroach:v21.1.2
+        - name: RELATED_IMAGE_COCKROACH_v21_1_3
+          value: cockroachdb/cockroach:v21.1.3
+        - name: RELATED_IMAGE_COCKROACH_v21_1_4
+          value: cockroachdb/cockroach:v21.1.4
+        - name: RELATED_IMAGE_COCKROACH_v21_1_5
+          value: cockroachdb/cockroach:v21.1.5
+        - name: RELATED_IMAGE_COCKROACH_v21_1_6
+          value: cockroachdb/cockroach:v21.1.6
+        - name: RELATED_IMAGE_COCKROACH_v21_1_7
+          value: cockroachdb/cockroach:v21.1.7
+        - name: RELATED_IMAGE_COCKROACH_v21_1_9
+          value: cockroachdb/cockroach:v21.1.9
+        - name: RELATED_IMAGE_COCKROACH_v21_1_10
+          value: cockroachdb/cockroach:v21.1.10
+        - name: RELATED_IMAGE_COCKROACH_v21_1_11
+          value: cockroachdb/cockroach:v21.1.11
+        - name: RELATED_IMAGE_COCKROACH_v21_1_12
+          value: cockroachdb/cockroach:v21.1.12
+        - name: RELATED_IMAGE_COCKROACH_v21_1_13
+          value: cockroachdb/cockroach:v21.1.13
+        - name: RELATED_IMAGE_COCKROACH_v21_1_14
+          value: cockroachdb/cockroach:v21.1.14
+        - name: RELATED_IMAGE_COCKROACH_v21_1_15
+          value: cockroachdb/cockroach:v21.1.15
+        - name: RELATED_IMAGE_COCKROACH_v21_1_16
+          value: cockroachdb/cockroach:v21.1.16
+        - name: RELATED_IMAGE_COCKROACH_v21_1_17
+          value: cockroachdb/cockroach:v21.1.17
+        - name: RELATED_IMAGE_COCKROACH_v21_1_18
+          value: cockroachdb/cockroach:v21.1.18
+        - name: RELATED_IMAGE_COCKROACH_v21_1_19
+          value: cockroachdb/cockroach:v21.1.19
+        - name: RELATED_IMAGE_COCKROACH_v21_1_20
+          value: cockroachdb/cockroach:v21.1.20
+        - name: RELATED_IMAGE_COCKROACH_v21_1_21
+          value: cockroachdb/cockroach:v21.1.21
+        - name: RELATED_IMAGE_COCKROACH_v21_2_0
+          value: cockroachdb/cockroach:v21.2.0
+        - name: RELATED_IMAGE_COCKROACH_v21_2_1
+          value: cockroachdb/cockroach:v21.2.1
+        - name: RELATED_IMAGE_COCKROACH_v21_2_2
+          value: cockroachdb/cockroach:v21.2.2
+        - name: RELATED_IMAGE_COCKROACH_v21_2_3
+          value: cockroachdb/cockroach:v21.2.3
+        - name: RELATED_IMAGE_COCKROACH_v21_2_4
+          value: cockroachdb/cockroach:v21.2.4
+        - name: RELATED_IMAGE_COCKROACH_v21_2_5
+          value: cockroachdb/cockroach:v21.2.5
+        - name: RELATED_IMAGE_COCKROACH_v21_2_7
+          value: cockroachdb/cockroach:v21.2.7
+        - name: RELATED_IMAGE_COCKROACH_v21_2_8
+          value: cockroachdb/cockroach:v21.2.8
+        - name: RELATED_IMAGE_COCKROACH_v21_2_9
+          value: cockroachdb/cockroach:v21.2.9
+        - name: RELATED_IMAGE_COCKROACH_v21_2_10
+          value: cockroachdb/cockroach:v21.2.10
+        - name: RELATED_IMAGE_COCKROACH_v21_2_11
+          value: cockroachdb/cockroach:v21.2.11
+        - name: RELATED_IMAGE_COCKROACH_v21_2_12
+          value: cockroachdb/cockroach:v21.2.12
+        - name: RELATED_IMAGE_COCKROACH_v21_2_13
+          value: cockroachdb/cockroach:v21.2.13
+        - name: RELATED_IMAGE_COCKROACH_v21_2_14
+          value: cockroachdb/cockroach:v21.2.14
+        - name: RELATED_IMAGE_COCKROACH_v21_2_15
+          value: cockroachdb/cockroach:v21.2.15
+        - name: RELATED_IMAGE_COCKROACH_v21_2_16
+          value: cockroachdb/cockroach:v21.2.16
+        - name: RELATED_IMAGE_COCKROACH_v21_2_17
+          value: cockroachdb/cockroach:v21.2.17
+        - name: RELATED_IMAGE_COCKROACH_v22_1_0
+          value: cockroachdb/cockroach:v22.1.0
+        - name: RELATED_IMAGE_COCKROACH_v22_1_1
+          value: cockroachdb/cockroach:v22.1.1
+        - name: RELATED_IMAGE_COCKROACH_v22_1_2
+          value: cockroachdb/cockroach:v22.1.2
+        - name: RELATED_IMAGE_COCKROACH_v22_1_3
+          value: cockroachdb/cockroach:v22.1.3
+        - name: RELATED_IMAGE_COCKROACH_v22_1_4
+          value: cockroachdb/cockroach:v22.1.4
+        - name: RELATED_IMAGE_COCKROACH_v22_1_5
+          value: cockroachdb/cockroach:v22.1.5
+        - name: RELATED_IMAGE_COCKROACH_v22_1_7
+          value: cockroachdb/cockroach:v22.1.7
+        - name: RELATED_IMAGE_COCKROACH_v22_1_8
+          value: cockroachdb/cockroach:v22.1.8
+        - name: RELATED_IMAGE_COCKROACH_v22_1_10
+          value: cockroachdb/cockroach:v22.1.10
+        - name: RELATED_IMAGE_COCKROACH_v22_1_11
+          value: cockroachdb/cockroach:v22.1.11
+        - name: RELATED_IMAGE_COCKROACH_v22_1_12
+          value: cockroachdb/cockroach:v22.1.12
+        - name: RELATED_IMAGE_COCKROACH_v22_2_0
+          value: cockroachdb/cockroach:v22.2.0
+        - name: OPERATOR_NAME
+          value: cockroachdb
+        - name: WATCH_NAMESPACE
+          value: %TFS_CRDB_NAMESPACE%
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        image: cockroachdb/cockroach-operator:v2.9.0
+        imagePullPolicy: IfNotPresent
+        name: cockroach-operator
+        resources:
+          requests:
+            cpu: 10m
+            memory: 32Mi
+      serviceAccountName: cockroach-operator-sa
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+  creationTimestamp: null
+  name: cockroach-operator-mutating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+  - v1
+  clientConfig:
+    service:
+      name: cockroach-operator-webhook-service
+      namespace: cockroach-operator-system
+      path: /mutate-crdb-cockroachlabs-com-v1alpha1-crdbcluster
+  failurePolicy: Fail
+  name: mcrdbcluster.kb.io
+  rules:
+  - apiGroups:
+    - crdb.cockroachlabs.com
+    apiVersions:
+    - v1alpha1
+    operations:
+    - CREATE
+    - UPDATE
+    resources:
+    - crdbclusters
+  sideEffects: None
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+  creationTimestamp: null
+  name: cockroach-operator-validating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+  - v1
+  clientConfig:
+    service:
+      name: cockroach-operator-webhook-service
+      namespace: cockroach-operator-system
+      path: /validate-crdb-cockroachlabs-com-v1alpha1-crdbcluster
+  failurePolicy: Fail
+  name: vcrdbcluster.kb.io
+  rules:
+  - apiGroups:
+    - crdb.cockroachlabs.com
+    apiVersions:
+    - v1alpha1
+    operations:
+    - CREATE
+    - UPDATE
+    resources:
+    - crdbclusters
+  sideEffects: None
diff --git a/manifests/cockroachdb/single-node.yaml b/manifests/cockroachdb/single-node.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f207d25946e4d2283da848281c05a0c5d72b5943
--- /dev/null
+++ b/manifests/cockroachdb/single-node.yaml
@@ -0,0 +1,84 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: cockroachdb-public
+  labels:
+    app.kubernetes.io/component: database
+    app.kubernetes.io/instance: cockroachdb
+    app.kubernetes.io/name: cockroachdb
+spec:
+  type: ClusterIP
+  selector:
+    app.kubernetes.io/component: database
+    app.kubernetes.io/instance: cockroachdb
+    app.kubernetes.io/name: cockroachdb
+  ports:
+  - name: http
+    port: 8080
+    protocol: TCP
+    targetPort: 8080
+  - name: sql
+    port: 26257
+    protocol: TCP
+    targetPort: 26257
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: cockroachdb
+spec:
+  selector:
+    matchLabels:
+      app.kubernetes.io/component: database
+      app.kubernetes.io/instance: cockroachdb
+      app.kubernetes.io/name: cockroachdb
+  serviceName: "cockroachdb-public"
+  replicas: 1
+  minReadySeconds: 5
+  template:
+    metadata:
+      labels:
+        app.kubernetes.io/component: database
+        app.kubernetes.io/instance: cockroachdb
+        app.kubernetes.io/name: cockroachdb
+    spec:
+      terminationGracePeriodSeconds: 10
+      restartPolicy: Always
+      containers:
+      - name: cockroachdb
+        image: cockroachdb/cockroach:latest-v22.2
+        args:
+        - start-single-node
+        ports:
+        - containerPort: 8080
+          name: http
+        - containerPort: 26257
+          name: sql
+        env:
+        - name: COCKROACH_DATABASE
+          value: "%CRDB_DATABASE%"
+        - name: COCKROACH_USER
+          value: "%CRDB_USERNAME%"
+        - name: COCKROACH_PASSWORD
+          value: "%CRDB_PASSWORD%"
+        resources:
+          requests:
+            cpu: "250m"
+            memory: 1Gi
+          limits:
+            cpu: "1"
+            memory: 2Gi
diff --git a/manifests/computeservice.yaml b/manifests/computeservice.yaml
index 0c8d0a6724a49f4dec0f903570ff04e49cb4e793..89a4a39e5c1e9971b3ea496dbfa1a6595066b7f9 100644
--- a/manifests/computeservice.yaml
+++ b/manifests/computeservice.yaml
@@ -28,7 +28,7 @@ spec:
       terminationGracePeriodSeconds: 5
       containers:
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/compute:latest
+        image: labs.etsi.org:5050/tfs/controller/compute:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 8080
diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml
index 2998640327864c1e9c2f6782a5adf252eb6673a7..805808d5d2829ceebf3217f2b39ea331c62d19d5 100644
--- a/manifests/contextservice.yaml
+++ b/manifests/contextservice.yaml
@@ -28,35 +28,22 @@ spec:
     spec:
       terminationGracePeriodSeconds: 5
       containers:
-      - name: redis
-        image: redis:6.2
-        ports:
-        - containerPort: 6379
-        resources:
-          requests:
-            cpu: 100m
-            memory: 128Mi
-          limits:
-            cpu: 500m
-            memory: 1024Mi
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/context:latest
+        image: labs.etsi.org:5050/tfs/controller/context:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 1010
-        - containerPort: 8080
         - containerPort: 9192
         env:
-        - name: DB_BACKEND
-          value: "redis"
         - name: MB_BACKEND
-          value: "redis"
-        - name: REDIS_DATABASE_ID
-          value: "0"
+          value: "nats"
         - name: LOG_LEVEL
           value: "INFO"
-        - name: POPULATE_FAKE_DATA
-          value: "false"
+        envFrom:
+        - secretRef:
+            name: crdb-data
+        - secretRef:
+            name: nats-data
         readinessProbe:
           exec:
             command: ["/bin/grpc_health_probe", "-addr=:1010"]
@@ -86,10 +73,6 @@ spec:
     protocol: TCP
     port: 1010
     targetPort: 1010
-  - name: http
-    protocol: TCP
-    port: 8080
-    targetPort: 8080
   - name: metrics
     protocol: TCP
     port: 9192
diff --git a/manifests/dbscanservingservice.yaml b/manifests/dbscanservingservice.yaml
index 9553ed556bddaa437d89881f0c4220ae6e418239..e1f73a237c2a2a4d78b58778e83d88a0d3516eae 100644
--- a/manifests/dbscanservingservice.yaml
+++ b/manifests/dbscanservingservice.yaml
@@ -28,7 +28,7 @@ spec:
       terminationGracePeriodSeconds: 5
       containers:
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/dbscanserving:latest
+        image: labs.etsi.org:5050/tfs/controller/dbscanserving:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 10006
diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml
index 83daa41f3c0cdf8e84b02dfc0ad18d8f7644e57b..3580df088176d4ec3c7a249ff4b8ae609082928a 100644
--- a/manifests/deviceservice.yaml
+++ b/manifests/deviceservice.yaml
@@ -29,7 +29,7 @@ spec:
       terminationGracePeriodSeconds: 5
       containers:
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/device:latest
+        image: labs.etsi.org:5050/tfs/controller/device:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 2020
diff --git a/manifests/dltservice.yaml b/manifests/dltservice.yaml
index 0f6b5bb9df1ccfc6057c0746058da6754233376a..c067960b791cc4211118904dc60c23dbe8ab013b 100644
--- a/manifests/dltservice.yaml
+++ b/manifests/dltservice.yaml
@@ -28,7 +28,7 @@ spec:
       terminationGracePeriodSeconds: 5
       containers:
       - name: connector
-        image: registry.gitlab.com/teraflow-h2020/controller/dlt-connector:latest
+        image: labs.etsi.org:5050/tfs/controller/dlt-connector:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 8080
@@ -55,7 +55,7 @@ spec:
             cpu: 500m
             memory: 512Mi
       - name: gateway
-        image: registry.gitlab.com/teraflow-h2020/controller/dlt-gateway:latest
+        image: labs.etsi.org:5050/tfs/controller/dlt-gateway:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 50051
diff --git a/manifests/interdomainservice.yaml b/manifests/interdomainservice.yaml
index b275035f62c68eeb8d28f1892909650ca10defee..b214343614626936b8c43bfaef6310ba21fed3cf 100644
--- a/manifests/interdomainservice.yaml
+++ b/manifests/interdomainservice.yaml
@@ -28,7 +28,7 @@ spec:
       terminationGracePeriodSeconds: 5
       containers:
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/interdomain:latest
+        image: labs.etsi.org:5050/tfs/controller/interdomain:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 10010
diff --git a/manifests/l3_attackmitigatorservice.yaml b/manifests/l3_attackmitigatorservice.yaml
index 2240776ebb2e234b58febe9520a4b9e07d42b6d4..592143089c326c6bf06eaad982f725bacc48717d 100644
--- a/manifests/l3_attackmitigatorservice.yaml
+++ b/manifests/l3_attackmitigatorservice.yaml
@@ -28,7 +28,7 @@ spec:
       terminationGracePeriodSeconds: 5
       containers:
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/l3_attackmitigator:latest
+        image: labs.etsi.org:5050/tfs/controller/l3_attackmitigator:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 10002
diff --git a/manifests/l3_centralizedattackdetectorservice.yaml b/manifests/l3_centralizedattackdetectorservice.yaml
index fa7ee9dccd99982d35d7f7705e463ecee30c7c9b..8672cab9554a5def10c46f75935cea9c45c03529 100644
--- a/manifests/l3_centralizedattackdetectorservice.yaml
+++ b/manifests/l3_centralizedattackdetectorservice.yaml
@@ -28,7 +28,7 @@ spec:
       terminationGracePeriodSeconds: 5
       containers:
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/l3_centralizedattackdetector:latest
+        image: labs.etsi.org:5050/tfs/controller/l3_centralizedattackdetector:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 10001
diff --git a/manifests/l3_distributedattackdetectorservice.yaml b/manifests/l3_distributedattackdetectorservice.yaml
index 6b28f68dd5e08561eb29e4512af330b26f6408cf..8765b7171c174b9cf4a0ad8854dc29b61839114f 100644
--- a/manifests/l3_distributedattackdetectorservice.yaml
+++ b/manifests/l3_distributedattackdetectorservice.yaml
@@ -28,7 +28,7 @@ spec:
       terminationGracePeriodSeconds: 5
       containers:
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/l3_distributedattackdetector:latest
+        image: labs.etsi.org:5050/tfs/controller/l3_distributedattackdetector:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 10000
diff --git a/manifests/load_generatorservice.yaml b/manifests/load_generatorservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4d7b32d1bc6e608bdcecf290367aff8ad729a19e
--- /dev/null
+++ b/manifests/load_generatorservice.yaml
@@ -0,0 +1,67 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: load-generatorservice
+spec:
+  selector:
+    matchLabels:
+      app: load-generatorservice
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: load-generatorservice
+    spec:
+      terminationGracePeriodSeconds: 5
+      containers:
+      - name: server
+        image: labs.etsi.org:5050/tfs/controller/load_generator:latest
+        imagePullPolicy: Always
+        ports:
+        - containerPort: 50052
+        env:
+        - name: LOG_LEVEL
+          value: "INFO"
+        readinessProbe:
+          exec:
+            command: ["/bin/grpc_health_probe", "-addr=:50052"]
+        livenessProbe:
+          exec:
+            command: ["/bin/grpc_health_probe", "-addr=:50052"]
+        resources:
+          requests:
+            cpu: 50m
+            memory: 64Mi
+          limits:
+            cpu: 500m
+            memory: 512Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: load-generatorservice
+  labels:
+    app: load-generatorservice
+spec:
+  type: ClusterIP
+  selector:
+    app: load-generatorservice
+  ports:
+  - name: grpc
+    protocol: TCP
+    port: 50052
+    targetPort: 50052
diff --git a/manifests/mock_blockchain.yaml b/manifests/mock_blockchain.yaml
index bf9abac703b263ad6a843f0d70848dde94a4ab97..17b32a47ed1a4b61bf83a12344a8ef1d6e2c336e 100644
--- a/manifests/mock_blockchain.yaml
+++ b/manifests/mock_blockchain.yaml
@@ -28,7 +28,7 @@ spec:
       terminationGracePeriodSeconds: 5
       containers:
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/mock_blockchain:latest
+        image: labs.etsi.org:5050/tfs/controller/mock_blockchain:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 50051
diff --git a/manifests/monitoringservice.yaml b/manifests/monitoringservice.yaml
index aed8d1c51e5e84abec11dcc272c786b208dd9556..b5f3042ba068153856d42f15c174a80ebdc6f266 100644
--- a/manifests/monitoringservice.yaml
+++ b/manifests/monitoringservice.yaml
@@ -66,7 +66,7 @@ spec:
       restartPolicy: Always
       containers:
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/monitoring:latest
+        image: labs.etsi.org:5050/tfs/controller/monitoring:latest
         imagePullPolicy: Always
         ports:
         - name: grpc
diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml
index 50ff81c79eaa02647562456809226d1aed847204..fe262d3283a6d857097ad161bb36f9b37b33a7c3 100644
--- a/manifests/nginx_ingress_http.yaml
+++ b/manifests/nginx_ingress_http.yaml
@@ -22,13 +22,13 @@ spec:
               name: webuiservice
               port:
                 number: 3000
-        - path: /context(/|$)(.*)
-          pathType: Prefix
-          backend:
-            service:
-              name: contextservice
-              port:
-                number: 8080
+        #- path: /context(/|$)(.*)
+        #  pathType: Prefix
+        #  backend:
+        #    service:
+        #      name: contextservice
+        #      port:
+        #        number: 8080
         - path: /()(restconf/.*)
           pathType: Prefix
           backend:
diff --git a/manifests/opticalattackmitigatorservice.yaml b/manifests/opticalattackmitigatorservice.yaml
index afe2e4069fbae2fd3b5300da614b4deb5d785fab..0252eec219306a22f4fba7c4509639a41fc9803e 100644
--- a/manifests/opticalattackmitigatorservice.yaml
+++ b/manifests/opticalattackmitigatorservice.yaml
@@ -28,7 +28,7 @@ spec:
       terminationGracePeriodSeconds: 5
       containers:
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/opticalattackmitigator:latest
+        image: labs.etsi.org:5050/tfs/controller/opticalattackmitigator:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 10007
diff --git a/manifests/opticalcentralizedattackdetectorservice.yaml b/manifests/opticalcentralizedattackdetectorservice.yaml
index 664bcb54348e533ff40c7f882b5668f727a39053..4a49f8b13c09617caaf3c170ee265e9166802a71 100644
--- a/manifests/opticalcentralizedattackdetectorservice.yaml
+++ b/manifests/opticalcentralizedattackdetectorservice.yaml
@@ -28,7 +28,7 @@ spec:
       terminationGracePeriodSeconds: 5
       containers:
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/opticalcentralizedattackdetector:latest
+        image: labs.etsi.org:5050/tfs/controller/opticalcentralizedattackdetector:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 10005
diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml
index 71c927b567316bb118ff085f1727abd03e92c0d7..4f7a65c45bb0902f1f9800c0bd3613a3b25b440a 100644
--- a/manifests/pathcompservice.yaml
+++ b/manifests/pathcompservice.yaml
@@ -20,7 +20,7 @@ spec:
   selector:
     matchLabels:
       app: pathcompservice
-  replicas: 5
+  replicas: 1
   template:
     metadata:
       labels:
@@ -29,7 +29,7 @@ spec:
       terminationGracePeriodSeconds: 5
       containers:
       - name: frontend
-        image: registry.gitlab.com/teraflow-h2020/controller/pathcomp-frontend:latest
+        image: labs.etsi.org:5050/tfs/controller/pathcomp-frontend:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 10020
@@ -51,7 +51,7 @@ spec:
             cpu: 500m
             memory: 512Mi
       - name: backend
-        image: registry.gitlab.com/teraflow-h2020/controller/pathcomp-backend:latest
+        image: labs.etsi.org:5050/tfs/controller/pathcomp-backend:latest
         imagePullPolicy: Always
         #readinessProbe:
         #  httpGet:
diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml
index 089be20f969509c9d5f12922a6cd991acd2f3bc0..02c9e25db23d0ddd237fc7b978e9615c65aa10e9 100644
--- a/manifests/serviceservice.yaml
+++ b/manifests/serviceservice.yaml
@@ -20,7 +20,7 @@ spec:
   selector:
     matchLabels:
       app: serviceservice
-  replicas: 5
+  replicas: 1
   template:
     metadata:
       labels:
@@ -29,7 +29,7 @@ spec:
       terminationGracePeriodSeconds: 5
       containers:
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/service:latest
+        image: labs.etsi.org:5050/tfs/controller/service:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 3030
diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml
index ff4b41fe7c709acf0d58c9c73b9f6198104a89fd..56c5eb1b5301481a64d1b660d80a1ba02ff232c1 100644
--- a/manifests/sliceservice.yaml
+++ b/manifests/sliceservice.yaml
@@ -29,7 +29,7 @@ spec:
       terminationGracePeriodSeconds: 5
       containers:
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/slice:latest
+        image: labs.etsi.org:5050/tfs/controller/slice:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 4040
diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml
index 7f70e837c4b6b979477a3a02db6e744b41387d73..d0a64871a654e91c91bac9e2782b978fc821cea8 100644
--- a/manifests/webuiservice.yaml
+++ b/manifests/webuiservice.yaml
@@ -32,7 +32,7 @@ spec:
           - 0
       containers:
       - name: server
-        image: registry.gitlab.com/teraflow-h2020/controller/webui:latest
+        image: labs.etsi.org:5050/tfs/controller/webui:latest
         imagePullPolicy: Always
         ports:
         - containerPort: 8004
diff --git a/my_deploy.sh b/my_deploy.sh
index ffd91da35186fe21f418950493ef797a9af1b522..644904f8bd635c00477c6cec3e70665da199c86a 100644
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -1,18 +1,31 @@
-# Set the URL of your local Docker registry where the images will be uploaded to.
-export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-# Supported components are:
-#   context device automation policy service compute monitoring webui
-#   interdomain slice pathcomp dlt
-#   dbscanserving opticalattackmitigator opticalattackdetector
-#   l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector
-export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
+export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui load_generator"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
 
-# Set the name of the Kubernetes namespace to deploy to.
+# Set the name of the Kubernetes namespace to deploy TFS to.
 export TFS_K8S_NAMESPACE="tfs"
 
 # Set additional manifest files to be applied after the deployment
@@ -21,6 +34,35 @@ export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
 # Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
 
-# If not already set, disable skip-build flag.
-# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
-export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""}
+# Disable skip-build flag to rebuild the Docker images.
+export TFS_SKIP_BUILD=""
+
+# ----- CockroachDB ------------------------------------------------------------
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if exists.
+export CRDB_DROP_DATABASE_IF_EXISTS=""
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+# ----- NATS -------------------------------------------------------------------
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
diff --git a/proto/context.proto b/proto/context.proto
index 3f0532d231535c2e59c798cbc9a6b1c92e1eb4bf..ce7534c806aecd4e1b43fd1c8a39772c1f1a7b9c 100644
--- a/proto/context.proto
+++ b/proto/context.proto
@@ -101,9 +101,11 @@ message ContextId {
 
 message Context {
   ContextId context_id = 1;
-  repeated TopologyId topology_ids = 2;
-  repeated ServiceId service_ids = 3;
-  TeraFlowController controller = 4;
+  string name = 2;
+  repeated TopologyId topology_ids = 3;
+  repeated ServiceId service_ids = 4;
+  repeated SliceId slice_ids = 5;
+  TeraFlowController controller = 6;
 }
 
 message ContextIdList {
@@ -128,8 +130,9 @@ message TopologyId {
 
 message Topology {
   TopologyId topology_id = 1;
-  repeated DeviceId device_ids = 2;
-  repeated LinkId link_ids = 3;
+  string name = 2;
+  repeated DeviceId device_ids = 3;
+  repeated LinkId link_ids = 4;
 }
 
 message TopologyIdList {
@@ -153,12 +156,13 @@ message DeviceId {
 
 message Device {
   DeviceId device_id = 1;
-  string device_type = 2;
-  DeviceConfig device_config = 3;
-  DeviceOperationalStatusEnum device_operational_status = 4;
-  repeated DeviceDriverEnum device_drivers = 5;
-  repeated EndPoint device_endpoints = 6;
-  repeated Component component = 7; // Used for inventory
+  string name = 2;
+  string device_type = 3;
+  DeviceConfig device_config = 4;
+  DeviceOperationalStatusEnum device_operational_status = 5;
+  repeated DeviceDriverEnum device_drivers = 6;
+  repeated EndPoint device_endpoints = 7;
+  repeated Component component = 8; // Used for inventory
 }
 
 message Component {
@@ -207,7 +211,8 @@ message LinkId {
 
 message Link {
   LinkId link_id = 1;
-  repeated EndPointId link_endpoint_ids = 2;
+  string name = 2;
+  repeated EndPointId link_endpoint_ids = 3;
 }
 
 message LinkIdList {
@@ -232,12 +237,13 @@ message ServiceId {
 
 message Service {
   ServiceId service_id = 1;
-  ServiceTypeEnum service_type = 2;
-  repeated EndPointId service_endpoint_ids = 3;
-  repeated Constraint service_constraints = 4;
-  ServiceStatus service_status = 5;
-  ServiceConfig service_config = 6;
-  Timestamp timestamp = 7;
+  string name = 2;
+  ServiceTypeEnum service_type = 3;
+  repeated EndPointId service_endpoint_ids = 4;
+  repeated Constraint service_constraints = 5;
+  ServiceStatus service_status = 6;
+  ServiceConfig service_config = 7;
+  Timestamp timestamp = 8;
 }
 
 enum ServiceTypeEnum {
@@ -284,14 +290,15 @@ message SliceId {
 
 message Slice {
   SliceId slice_id = 1;
-  repeated EndPointId slice_endpoint_ids = 2;
-  repeated Constraint slice_constraints = 3;
-  repeated ServiceId slice_service_ids = 4;
-  repeated SliceId slice_subslice_ids = 5;
-  SliceStatus slice_status = 6;
-  SliceConfig slice_config = 7;
-  SliceOwner slice_owner = 8;
-  Timestamp timestamp = 9;
+  string name = 2;
+  repeated EndPointId slice_endpoint_ids = 3;
+  repeated Constraint slice_constraints = 4;
+  repeated ServiceId slice_service_ids = 5;
+  repeated SliceId slice_subslice_ids = 6;
+  SliceStatus slice_status = 7;
+  SliceConfig slice_config = 8;
+  SliceOwner slice_owner = 9;
+  Timestamp timestamp = 10;
 }
 
 message SliceOwner {
@@ -300,11 +307,11 @@ message SliceOwner {
 }
 
 enum SliceStatusEnum {
-  SLICESTATUS_UNDEFINED = 0;
-  SLICESTATUS_PLANNED   = 1;
-  SLICESTATUS_INIT      = 2;
-  SLICESTATUS_ACTIVE    = 3;
-  SLICESTATUS_DEINIT    = 4;
+  SLICESTATUS_UNDEFINED    = 0;
+  SLICESTATUS_PLANNED      = 1;
+  SLICESTATUS_INIT         = 2;
+  SLICESTATUS_ACTIVE       = 3;
+  SLICESTATUS_DEINIT       = 4;
   SLICESTATUS_SLA_VIOLATED = 5;
 }
 
@@ -400,17 +407,18 @@ message EndPointId {
 
 message EndPoint {
   EndPointId endpoint_id = 1;
-  string endpoint_type = 2;
-  repeated kpi_sample_types.KpiSampleType kpi_sample_types = 3;
-  Location endpoint_location = 4;
+  string name = 2;
+  string endpoint_type = 3;
+  repeated kpi_sample_types.KpiSampleType kpi_sample_types = 4;
+  Location endpoint_location = 5;
 }
 
 
 // ----- Configuration -------------------------------------------------------------------------------------------------
 enum ConfigActionEnum {
   CONFIGACTION_UNDEFINED = 0;
-  CONFIGACTION_SET = 1;
-  CONFIGACTION_DELETE = 2;
+  CONFIGACTION_SET       = 1;
+  CONFIGACTION_DELETE    = 2;
 }
 
 message ConfigRule_Custom {
diff --git a/proto/load_generator.proto b/proto/load_generator.proto
new file mode 100644
index 0000000000000000000000000000000000000000..00ddb254cd4bdb9e947906f477a408ece079269e
--- /dev/null
+++ b/proto/load_generator.proto
@@ -0,0 +1,23 @@
+// Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package load_generator;
+
+import "context.proto";
+
+service LoadGeneratorService {
+  rpc Start(context.Empty) returns (context.Empty) {}
+  rpc Stop (context.Empty) returns (context.Empty) {}
+}
diff --git a/report_coverage_slice.sh b/report_coverage_slice.sh
deleted file mode 100755
index f783ec069329a9efe100154a2702a72a93e0ad8a..0000000000000000000000000000000000000000
--- a/report_coverage_slice.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-./report_coverage_all.sh | grep --color -E -i "^slice/.*$|$"
diff --git a/scripts/cockroachdb_client.sh b/scripts/cockroachdb_client.sh
new file mode 100755
index 0000000000000000000000000000000000000000..edd9794465e0afa5acc743a3b4375040b793857d
--- /dev/null
+++ b/scripts/cockroachdb_client.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+# If not already set, set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"}
+
+# If not already set, set the database username to be used by Context.
+export CRDB_USERNAME=${CRDB_USERNAME:-"tfs"}
+
+# If not already set, set the database user's password to be used by Context.
+export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
+
+# If not already set, set the database name to be used by Context.
+export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
+
+# If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'.
+export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"}
+
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+if [ "$CRDB_DEPLOY_MODE" == "single" ]; then
+    CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+    CRDB_CLIENT_URL="postgresql://${CRDB_USERNAME}:${CRDB_PASSWORD}@cockroachdb-0:${CRDB_SQL_PORT}/defaultdb?sslmode=require"
+    kubectl exec -it --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \
+        ./cockroach sql --certs-dir=/cockroach/cockroach-certs --url=${CRDB_CLIENT_URL}
+elif [ "$CRDB_DEPLOY_MODE" == "cluster" ]; then
+    kubectl exec -it --namespace ${CRDB_NAMESPACE} cockroachdb-client-secure -- \
+        ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public
+else
+    echo "Unsupported value: CRDB_DEPLOY_MODE=$CRDB_DEPLOY_MODE"
+fi
diff --git a/scripts/old/deploy_in_kubernetes.sh b/scripts/old/deploy_in_kubernetes.sh
index 89f45a5484f95f065f6656249f3fb04bf507a782..c85354137462e259de787df46ec2d1b8f40331c6 100755
--- a/scripts/old/deploy_in_kubernetes.sh
+++ b/scripts/old/deploy_in_kubernetes.sh
@@ -43,7 +43,7 @@ export EXTRA_MANIFESTS=${EXTRA_MANIFESTS:-""}
 ########################################################################################################################
 
 # Constants
-GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller"
+GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
 TMP_FOLDER="./tmp"
 
 # Create a tmp folder for files modified during the deployment
diff --git a/scripts/run_tests_locally-context.sh b/scripts/run_tests_locally-context.sh
index 7033fcb01a468731b498708096a80fac8d9a9a85..9d29ac5873c03f93ee52da1d32ffd8d6450fed3b 100755
--- a/scripts/run_tests_locally-context.sh
+++ b/scripts/run_tests_locally-context.sh
@@ -13,31 +13,73 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-########################################################################################################################
-# Define your deployment settings here
-########################################################################################################################
+PROJECTDIR=`pwd`
 
-# If not already set, set the name of the Kubernetes namespace to deploy to.
-export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+COVERAGEFILE=$PROJECTDIR/coverage/.coverage
 
-export TFS_K8S_HOSTNAME="tfs-vm"
+# Destroy old coverage file and configure the correct folder on the .coveragerc file
+rm -f $COVERAGEFILE
+cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/tfs-ctrl+$PROJECTDIR+g > $RCFILE
 
-########################################################################################################################
-# Automated steps start here
-########################################################################################################################
+echo
+echo "Pre-test clean-up:"
+echo "------------------"
+docker rm -f crdb nats
+docker volume rm -f crdb
+docker network rm tfs-br
 
-PROJECTDIR=`pwd`
+echo
+echo "Pull Docker images:"
+echo "-------------------"
+docker pull cockroachdb/cockroach:latest-v22.2
+docker pull nats:2.9
 
-cd $PROJECTDIR/src
-RCFILE=$PROJECTDIR/coverage/.coveragerc
+echo
+echo "Create test environment:"
+echo "------------------------"
+docker network create -d bridge --subnet=172.254.254.0/24 --gateway=172.254.254.1 --ip-range=172.254.254.0/24 tfs-br
+docker volume create crdb
+docker run --name crdb -d --network=tfs-br --ip 172.254.254.10 -p 26257:26257 -p 8080:8080 \
+    --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123\
+    --volume "crdb:/cockroach/cockroach-data" \
+    cockroachdb/cockroach:latest-v22.2 start-single-node
+docker run --name nats -d --network=tfs-br --ip 172.254.254.11 -p 4222:4222 -p 8222:8222 \
+    nats:2.9 --http_port 8222 --user tfs --pass tfs123
 
-kubectl --namespace $TFS_K8S_NAMESPACE expose deployment contextservice --name=redis-tests --port=6379 --type=NodePort
-#export REDIS_SERVICE_HOST=$(kubectl --namespace $TFS_K8S_NAMESPACE get service redis-tests -o 'jsonpath={.spec.clusterIP}')
-export REDIS_SERVICE_HOST=$(kubectl get node $TFS_K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}')
-export REDIS_SERVICE_PORT=$(kubectl --namespace $TFS_K8S_NAMESPACE get service redis-tests -o 'jsonpath={.spec.ports[?(@.port==6379)].nodePort}')
+echo
+echo "Waiting for initialization..."
+echo "-----------------------------"
+#docker logs -f crdb 2>&1 | grep --max-count=1 'finished creating default user "tfs"'
+while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done
+docker logs crdb
+#docker logs -f nats 2>&1 | grep --max-count=1 'Server is ready'
+while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done
+docker logs nats
+#sleep 10
+docker ps -a
 
-# Run unitary tests and analyze coverage of code at same time
+echo
+echo "Run unitary tests and analyze code coverage:"
+echo "--------------------------------------------"
+export CRDB_URI="cockroachdb://tfs:tfs123@172.254.254.10:26257/tfs_test?sslmode=require"
+export MB_BACKEND="nats"
+export NATS_URI="nats://tfs:tfs123@172.254.254.11:4222"
+export PYTHONPATH=/home/tfs/tfs-ctrl/src
+# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose --maxfail=1 \
-    context/tests/test_unitary.py
+    context/tests/test_*.py
+
+echo
+echo "Coverage report:"
+echo "----------------"
+#coverage report --rcfile=$RCFILE --sort cover --show-missing --skip-covered | grep --color -E -i "^context/.*$|$"
+coverage report --rcfile=$RCFILE --sort cover --show-missing --skip-covered --include="context/*"
 
-kubectl --namespace $TFS_K8S_NAMESPACE delete service redis-tests
+echo
+echo "Post-test clean-up:"
+echo "-------------------"
+docker rm -f crdb nats
+docker volume rm -f crdb
+docker network rm tfs-br
diff --git a/scripts/run_tests_locally.sh b/scripts/run_tests_locally.sh
index 1d48cc1af18629874b0275b1fa92bf31961741c3..486107994f85dc4cfb3ddd2d717815c0193e43c4 100755
--- a/scripts/run_tests_locally.sh
+++ b/scripts/run_tests_locally.sh
@@ -54,7 +54,7 @@ rm -f $COVERAGEFILE
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     common/orm/tests/test_unitary.py \
     common/message_broker/tests/test_unitary.py \
-    common/rpc_method_wrapper/tests/test_unitary.py
+    common/method_wrappers/tests/test_unitary.py
 
 coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
     context/tests/test_unitary.py
diff --git a/src/automation/.gitlab-ci.yml b/src/automation/.gitlab-ci.yml
index 87d141d5bef56c8d3fbd5bb5a0a961ac12598dd6..9c66a17983d5f547d2ffb9b32f4270c0201cad69 100644
--- a/src/automation/.gitlab-ci.yml
+++ b/src/automation/.gitlab-ci.yml
@@ -79,22 +79,22 @@ unit_test automation:
         - manifests/${IMAGE_NAME}service.yaml
         - .gitlab-ci.yml
 
-# Deployment of automation service in Kubernetes Cluster
-deploy automation:
-  stage: deploy
-  needs:
-    - build automation
-    - unit_test automation
-  script:
-    - kubectl version
-    - kubectl get all
-    - kubectl delete --ignore-not-found=true -f "manifests/automationservice.yaml"
-    - kubectl apply -f "manifests/automationservice.yaml"
-    - kubectl delete pods --selector app=automationservice
-    - kubectl get all
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
-      when: manual
-    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
-      when: manual
+## Deployment of automation service in Kubernetes Cluster
+#deploy automation:
+#  stage: deploy
+#  needs:
+#    - build automation
+#    - unit_test automation
+#  script:
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl delete --ignore-not-found=true -f "manifests/automationservice.yaml"
+#    - kubectl apply -f "manifests/automationservice.yaml"
+#    - kubectl delete pods --selector app=automationservice
+#    - kubectl get all
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
 
diff --git a/src/automation/src/main/resources/application.yml b/src/automation/src/main/resources/application.yml
index bc89d4348dfdfc5dc3f9159fb167d00509a8e4dc..62cf8fc8ec394bb374817c9153eb9b4552af28ba 100644
--- a/src/automation/src/main/resources/application.yml
+++ b/src/automation/src/main/resources/application.yml
@@ -33,9 +33,9 @@ quarkus:
     port: 8080
 
   container-image:
-    group: teraflow-h2020
+    group: tfs
     name: controller/automation
-    registry: registry.gitlab.com
+    registry: labs.etsi.org:5050
 
   kubernetes:
     name: automationservice
diff --git a/src/automation/target/kubernetes/kubernetes.yml b/src/automation/target/kubernetes/kubernetes.yml
index 8bc14b935b4e4f4a18ed03f10cca0b74f480dcf0..f4f1c7dae70109792339c54b6dc7c3c9fdec786a 100644
--- a/src/automation/target/kubernetes/kubernetes.yml
+++ b/src/automation/target/kubernetes/kubernetes.yml
@@ -52,7 +52,7 @@ spec:
               value: contextservice
             - name: DEVICE_SERVICE_HOST
               value: deviceservice
-          image: registry.gitlab.com/teraflow-h2020/controller/automation:0.2.0
+          image: labs.etsi.org:5050/tfs/controller/automation:0.2.0
           imagePullPolicy: Always
           livenessProbe:
             failureThreshold: 3
diff --git a/src/common/Constants.py b/src/common/Constants.py
index ffdfbc4e03adaa272ce5b841ea44923409df5cbe..c26409f27bc1fc809e2c7ece8949ac643314ac17 100644
--- a/src/common/Constants.py
+++ b/src/common/Constants.py
@@ -21,7 +21,7 @@ DEFAULT_LOG_LEVEL = logging.WARNING
 # Default gRPC server settings
 DEFAULT_GRPC_BIND_ADDRESS = '0.0.0.0'
 DEFAULT_GRPC_MAX_WORKERS  = 200
-DEFAULT_GRPC_GRACE_PERIOD = 60
+DEFAULT_GRPC_GRACE_PERIOD = 10
 
 # Default HTTP server settings
 DEFAULT_HTTP_BIND_ADDRESS = '0.0.0.0'
@@ -30,9 +30,9 @@ DEFAULT_HTTP_BIND_ADDRESS = '0.0.0.0'
 DEFAULT_METRICS_PORT = 9192
 
 # Default context and topology UUIDs
-DEFAULT_CONTEXT_UUID      = 'admin'
-DEFAULT_TOPOLOGY_UUID     = 'admin'     # contains the detailed local topology
-INTERDOMAIN_TOPOLOGY_UUID = 'inter'     # contains the abstract inter-domain topology
+DEFAULT_CONTEXT_NAME      = 'admin'
+DEFAULT_TOPOLOGY_NAME     = 'admin'     # contains the detailed local topology
+INTERDOMAIN_TOPOLOGY_NAME = 'inter'     # contains the abstract inter-domain topology
 
 # Default service names
 class ServiceNameEnum(Enum):
@@ -51,7 +51,8 @@ class ServiceNameEnum(Enum):
     WEBUI         = 'webui'
 
     # Used for test and debugging only
-    DLT_GATEWAY   = 'dltgateway'
+    DLT_GATEWAY    = 'dltgateway'
+    LOAD_GENERATOR = 'load-generator'
 
 # Default gRPC service ports
 DEFAULT_SERVICE_GRPC_PORTS = {
@@ -69,7 +70,8 @@ DEFAULT_SERVICE_GRPC_PORTS = {
     ServiceNameEnum.PATHCOMP     .value : 10020,
 
     # Used for test and debugging only
-    ServiceNameEnum.DLT_GATEWAY  .value : 50051,
+    ServiceNameEnum.DLT_GATEWAY   .value : 50051,
+    ServiceNameEnum.LOAD_GENERATOR.value : 50052,
 }
 
 # Default HTTP/REST-API service ports
diff --git a/src/common/message_broker/Factory.py b/src/common/message_broker/Factory.py
index a64913df02805dabc9c1924ea16966e43454b7b5..e601187061ea22946823c24ccf538886ac11bd78 100644
--- a/src/common/message_broker/Factory.py
+++ b/src/common/message_broker/Factory.py
@@ -17,13 +17,15 @@ from typing import Optional, Union
 from .backend._Backend import _Backend
 from .backend.BackendEnum import BackendEnum
 from .backend.inmemory.InMemoryBackend import InMemoryBackend
-from .backend.redis.RedisBackend import RedisBackend
+from .backend.nats.NatsBackend import NatsBackend
+#from .backend.redis.RedisBackend import RedisBackend
 
 LOGGER = logging.getLogger(__name__)
 
 BACKENDS = {
     BackendEnum.INMEMORY.value: InMemoryBackend,
-    BackendEnum.REDIS.value: RedisBackend,
+    BackendEnum.NATS.value: NatsBackend,
+    #BackendEnum.REDIS.value: RedisBackend,
     #BackendEnum.KAFKA.value: KafkaBackend,
     #BackendEnum.RABBITMQ.value: RabbitMQBackend,
     #BackendEnum.ZEROMQ.value: ZeroMQBackend,
diff --git a/src/common/message_broker/backend/BackendEnum.py b/src/common/message_broker/backend/BackendEnum.py
index bf95f176479fb227503dd04a9dde2b81789ec006..05dde81977702b65cbbdd477351744d92b722d1a 100644
--- a/src/common/message_broker/backend/BackendEnum.py
+++ b/src/common/message_broker/backend/BackendEnum.py
@@ -16,7 +16,8 @@ from enum import Enum
 
 class BackendEnum(Enum):
     INMEMORY = 'inmemory'
-    REDIS = 'redis'
+    NATS = 'nats'
+    #REDIS = 'redis'
     #KAFKA = 'kafka'
     #RABBITMQ = 'rabbitmq'
     #ZEROMQ = 'zeromq'
diff --git a/src/common/message_broker/backend/nats/NatsBackend.py b/src/common/message_broker/backend/nats/NatsBackend.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c644a0a815cb737211af1e00db5828da0120db5
--- /dev/null
+++ b/src/common/message_broker/backend/nats/NatsBackend.py
@@ -0,0 +1,58 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import queue, threading
+from typing import Iterator, Set, Tuple
+from common.Settings import get_setting
+from common.message_broker.Message import Message
+from .._Backend import _Backend
+from .NatsBackendThread import NatsBackendThread
+
+DEFAULT_NATS_URI = 'nats://127.0.0.1:4222'
+#NATS_URI_TEMPLATE = 'nats://{:s}:{:s}@nats.{:s}.svc.cluster.local:{:s}' # with authentication
+NATS_URI_TEMPLATE = 'nats://nats.{:s}.svc.cluster.local:{:s}'
+
+class NatsBackend(_Backend):
+    def __init__(self, **settings) -> None: # pylint: disable=super-init-not-called
+        nats_namespace   = get_setting('NATS_NAMESPACE', settings=settings)
+        nats_client_port = get_setting('NATS_CLIENT_PORT', settings=settings)
+        if nats_namespace is None or nats_client_port is None:
+            nats_uri = get_setting('NATS_URI', settings=settings, default=DEFAULT_NATS_URI)
+        else:
+            nats_uri = NATS_URI_TEMPLATE.format(nats_namespace, nats_client_port)
+        self._terminate = threading.Event()
+        self._nats_backend_thread = NatsBackendThread(nats_uri)
+        self._nats_backend_thread.start()
+
+    def terminate(self) -> None:
+        self._terminate.set()
+        self._nats_backend_thread.terminate()
+        self._nats_backend_thread.join()
+
+    def publish(self, topic_name : str, message_content : str) -> None:
+        self._nats_backend_thread.publish(topic_name, message_content)
+
+    def consume(self, topic_names : Set[str], consume_timeout : float) -> Iterator[Tuple[str, str]]:
+        out_queue = queue.Queue[Message]()
+        unsubscribe = threading.Event()
+        tasks = []
+        for topic_name in topic_names:
+            tasks.append(self._nats_backend_thread.subscribe(topic_name, consume_timeout, out_queue, unsubscribe))
+        while not self._terminate.is_set():
+            try:
+                yield out_queue.get(block=True, timeout=consume_timeout)
+            except queue.Empty:
+                continue
+        unsubscribe.set()
+        for task in tasks: task.cancel()
diff --git a/src/common/message_broker/backend/nats/NatsBackendThread.py b/src/common/message_broker/backend/nats/NatsBackendThread.py
new file mode 100644
index 0000000000000000000000000000000000000000..801cc361edab054a71d4c52add3370cc495382ef
--- /dev/null
+++ b/src/common/message_broker/backend/nats/NatsBackendThread.py
@@ -0,0 +1,75 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import asyncio, nats, nats.errors, queue, threading
+from typing import List
+from common.message_broker.Message import Message
+
+class NatsBackendThread(threading.Thread):
+    def __init__(self, nats_uri : str) -> None:
+        self._nats_uri = nats_uri
+        self._event_loop = asyncio.get_event_loop()
+        self._terminate = asyncio.Event()
+        self._tasks_terminated = asyncio.Event()
+        self._publish_queue = asyncio.Queue[Message]()
+        self._tasks : List[asyncio.Task] = list()
+        super().__init__()
+
+    def terminate(self) -> None:
+        self._terminate.set()
+        for task in self._tasks: task.cancel()
+        self._tasks_terminated.set()
+
+    async def _run_publisher(self) -> None:
+        client = await nats.connect(servers=[self._nats_uri])
+        while not self._terminate.is_set():
+            try:
+                message : Message = await self._publish_queue.get()
+            except asyncio.CancelledError:
+                break
+            await client.publish(message.topic, message.content.encode('UTF-8'))
+        await client.drain()
+
+    def publish(self, topic_name : str, message_content : str) -> None:
+        self._publish_queue.put_nowait(Message(topic_name, message_content))
+
+    async def _run_subscriber(
+        self, topic_name : str, timeout : float, out_queue : queue.Queue[Message], unsubscribe : threading.Event
+    ) -> None:
+        client = await nats.connect(servers=[self._nats_uri])
+        subscription = await client.subscribe(topic_name)
+        while not self._terminate.is_set() and not unsubscribe.is_set():
+            try:
+                message = await subscription.next_msg(timeout)
+            except nats.errors.TimeoutError:
+                continue
+            except asyncio.CancelledError:
+                break
+            out_queue.put(Message(message.subject, message.data.decode('UTF-8')))
+        await subscription.unsubscribe()
+        await client.drain()
+
+    def subscribe(
+        self, topic_name : str, timeout : float, out_queue : queue.Queue[Message], unsubscribe : threading.Event
+    ) -> None:
+        task = self._event_loop.create_task(self._run_subscriber(topic_name, timeout, out_queue, unsubscribe))
+        self._tasks.append(task)
+
+    def run(self) -> None:
+        asyncio.set_event_loop(self._event_loop)
+        task = self._event_loop.create_task(self._run_publisher())
+        self._tasks.append(task)
+        self._event_loop.run_until_complete(self._terminate.wait())
+        self._tasks.remove(task)
+        self._event_loop.run_until_complete(self._tasks_terminated.wait())
diff --git a/src/context/service/grpc_server/__init__.py b/src/common/message_broker/backend/nats/__init__.py
similarity index 100%
rename from src/context/service/grpc_server/__init__.py
rename to src/common/message_broker/backend/nats/__init__.py
diff --git a/src/common/method_wrappers/Decorator.py b/src/common/method_wrappers/Decorator.py
index 7ee2a919e10f25104d0fa77caaf8bafa11c2b30f..f918b845827951def858e0a9f5981724b0c56640 100644
--- a/src/common/method_wrappers/Decorator.py
+++ b/src/common/method_wrappers/Decorator.py
@@ -12,9 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import grpc, logging, threading
+import grpc, json, logging, threading
 from enum import Enum
-from typing import Dict, Tuple
+from prettytable import PrettyTable
+from typing import Any, Dict, List, Set, Tuple
 from prometheus_client import Counter, Histogram
 from prometheus_client.metrics import MetricWrapperBase, INF
 from common.tools.grpc.Tools import grpc_message_to_json_string
@@ -83,6 +84,75 @@ class MetricsPool:
 
         return histogram_duration, counter_started, counter_completed, counter_failed
 
+    def get_pretty_table(self, remove_empty_buckets : bool = True) -> PrettyTable:
+        with MetricsPool.lock:
+            method_to_metric_fields : Dict[str, Dict[str, Dict[str, Any]]] = dict()
+            bucket_bounds : Set[str] = set()
+            for raw_metric_name,raw_metric_data in MetricsPool.metrics.items():
+                if '_COUNTER_' in raw_metric_name:
+                    method_name,metric_name = raw_metric_name.split('_COUNTER_')
+                elif '_HISTOGRAM_' in raw_metric_name:
+                    method_name,metric_name = raw_metric_name.split('_HISTOGRAM_')
+                else:
+                    raise Exception('Unsupported metric: {:s}'.format(raw_metric_name)) # pragma: no cover
+                metric_data = method_to_metric_fields.setdefault(method_name, dict()).setdefault(metric_name, dict())
+                for field_name,labels,value,_,_ in raw_metric_data._child_samples():
+                    if field_name == '_bucket': bucket_bounds.add(labels['le'])
+                    if len(labels) > 0: field_name = '{:s}:{:s}'.format(field_name, json.dumps(labels, sort_keys=True))
+                    metric_data[field_name] = value
+            #print('method_to_metric_fields', method_to_metric_fields)
+
+            def sort_stats_key(item : List) -> float:
+                str_duration = str(item[0])
+                if str_duration == '---': return 0.0
+                return float(str_duration.replace(' ms', ''))
+
+            field_names = ['Method', 'TOT', 'OK', 'ERR', 'avg(Dur)']
+            bucket_bounds = sorted(bucket_bounds, key=float) # convert buckets to float to get the key
+            bucket_column_names = ['<={:s}'.format(bucket_bound) for bucket_bound in bucket_bounds]
+            field_names.extend(bucket_column_names)
+
+            pt_stats = PrettyTable(
+                field_names=field_names, sortby='avg(Dur)', sort_key=sort_stats_key, reversesort=True)
+            for f in field_names: pt_stats.align[f] = 'r'
+            for f in ['Method']: pt_stats.align[f] = 'l'
+
+            for method_name,metrics in method_to_metric_fields.items():
+                counter_started_value = int(metrics['REQUESTS_STARTED']['_total'])
+                if counter_started_value == 0:
+                    #pt_stats.add_row([method_name, '---', '---', '---', '---'])
+                    continue
+                counter_completed_value = int(metrics['REQUESTS_COMPLETED']['_total'])
+                counter_failed_value = int(metrics['REQUESTS_FAILED']['_total'])
+                duration_count_value = float(metrics['DURATION']['_count'])
+                duration_sum_value = float(metrics['DURATION']['_sum'])
+                duration_avg_value = duration_sum_value/duration_count_value
+
+                row = [
+                    method_name, str(counter_started_value), str(counter_completed_value), str(counter_failed_value),
+                    '{:.3f} ms'.format(1000.0 * duration_avg_value),
+                ]
+
+                total_count = 0
+                for bucket_bound in bucket_bounds:
+                    labels = json.dumps({"le": bucket_bound}, sort_keys=True)
+                    bucket_name = '_bucket:{:s}'.format(labels)
+                    accumulated_count = int(metrics['DURATION'][bucket_name])
+                    bucket_count = accumulated_count - total_count
+                    row.append(str(bucket_count) if bucket_count > 0 else '')
+                    total_count = accumulated_count
+
+                pt_stats.add_row(row)
+            
+            if remove_empty_buckets:
+                for bucket_column_name in bucket_column_names:
+                    col_index = pt_stats._field_names.index(bucket_column_name)
+                    num_non_empties = sum([1 for row in pt_stats._rows if len(row[col_index]) > 0])
+                    if num_non_empties > 0: continue
+                    pt_stats.del_column(bucket_column_name)
+            
+            return pt_stats
+
 def metered_subclass_method(metrics_pool : MetricsPool):
     def outer_wrapper(func):
         metrics = metrics_pool.get_metrics(func.__name__)
diff --git a/src/common/method_wrappers/ServiceExceptions.py b/src/common/method_wrappers/ServiceExceptions.py
index e8d5c79acca19117fca53ec216166c01d3f0781d..369565cf8d76975426174ba35ec14140526e8a39 100644
--- a/src/common/method_wrappers/ServiceExceptions.py
+++ b/src/common/method_wrappers/ServiceExceptions.py
@@ -13,54 +13,56 @@
 # limitations under the License.
 
 import grpc
-from typing import Iterable, Union
+from typing import Iterable, List, Tuple, Union
 
 class ServiceException(Exception):
     def __init__(
         self, code : grpc.StatusCode, details : str, extra_details : Union[str, Iterable[str]] = []
-        ) -> None:
-
+    ) -> None:
         self.code = code
         if isinstance(extra_details, str): extra_details = [extra_details]
-        self.details = '; '.join(map(str, [details] + extra_details))
+        self.details = '; '.join([str(item) for item in ([details] + extra_details)])
         super().__init__(self.details)
 
 class NotFoundException(ServiceException):
     def __init__(
         self, object_name : str, object_uuid: str, extra_details : Union[str, Iterable[str]] = []
-        ) -> None:
-
+    ) -> None:
         details = '{:s}({:s}) not found'.format(str(object_name), str(object_uuid))
         super().__init__(grpc.StatusCode.NOT_FOUND, details, extra_details=extra_details)
 
 class AlreadyExistsException(ServiceException):
     def __init__(
         self, object_name : str, object_uuid: str, extra_details : Union[str, Iterable[str]] = None
-        ) -> None:
-
+    ) -> None:
         details = '{:s}({:s}) already exists'.format(str(object_name), str(object_uuid))
         super().__init__(grpc.StatusCode.ALREADY_EXISTS, details, extra_details=extra_details)
 
 class InvalidArgumentException(ServiceException):
     def __init__(
         self, argument_name : str, argument_value: str, extra_details : Union[str, Iterable[str]] = None
-        ) -> None:
-
+    ) -> None:
         details = '{:s}({:s}) is invalid'.format(str(argument_name), str(argument_value))
         super().__init__(grpc.StatusCode.INVALID_ARGUMENT, details, extra_details=extra_details)
 
+class InvalidArgumentsException(ServiceException):
+    def __init__(
+        self, arguments : List[Tuple[str, str]], extra_details : Union[str, Iterable[str]] = None
+    ) -> None:
+        str_arguments = ', '.join(['{:s}({:s})'.format(name, value) for name,value in arguments])
+        details = 'Arguments {:s} are invalid'.format(str_arguments)
+        super().__init__(grpc.StatusCode.INVALID_ARGUMENT, details, extra_details=extra_details)
+
 class OperationFailedException(ServiceException):
     def __init__(
         self, operation : str, extra_details : Union[str, Iterable[str]] = None
-        ) -> None:
-
+    ) -> None:
         details = 'Operation({:s}) failed'.format(str(operation))
         super().__init__(grpc.StatusCode.INTERNAL, details, extra_details=extra_details)
 
 class NotImplementedException(ServiceException):
     def __init__(
         self, operation : str, extra_details : Union[str, Iterable[str]] = None
-        ) -> None:
-
+    ) -> None:
         details = 'Operation({:s}) not implemented'.format(str(operation))
         super().__init__(grpc.StatusCode.UNIMPLEMENTED, details, extra_details=extra_details)
diff --git a/src/common/method_wrappers/tests/deploy_specs.sh b/src/common/method_wrappers/tests/deploy_specs.sh
index 238918480ae857e64efb52f652b20ab08a21c2df..a5af70b04a84ffa83b0e19da005175f0291c4f93 100644
--- a/src/common/method_wrappers/tests/deploy_specs.sh
+++ b/src/common/method_wrappers/tests/deploy_specs.sh
@@ -7,7 +7,7 @@ export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
 #   interdomain slice pathcomp dlt
 #   dbscanserving opticalattackmitigator opticalattackdetector
 #   l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector
-export TFS_COMPONENTS="context device pathcomp service slice webui" # automation monitoring compute
+export TFS_COMPONENTS="context device pathcomp service slice webui load_generator" # automation monitoring compute dlt
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
diff --git a/src/common/tests/MockMessageBroker.py b/src/common/tests/MockMessageBroker.py
index 851c06766fd705bee746840f3d4ce9c4f4ac404d..563903b980a9375ea13acf152b99544195767e91 100644
--- a/src/common/tests/MockMessageBroker.py
+++ b/src/common/tests/MockMessageBroker.py
@@ -15,9 +15,24 @@
 import json, logging, threading, time
 from queue import Queue, Empty
 from typing import Dict, Iterator, NamedTuple, Set
+from common.proto.context_pb2 import EventTypeEnum
 
 LOGGER = logging.getLogger(__name__)
-CONSUME_TIMEOUT = 0.1 # seconds
+
+TOPIC_CONNECTION = 'connection'
+TOPIC_CONTEXT    = 'context'
+TOPIC_DEVICE     = 'device'
+TOPIC_LINK       = 'link'
+TOPIC_POLICY     = 'policy'
+TOPIC_SERVICE    = 'service'
+TOPIC_SLICE      = 'slice'
+TOPIC_TOPOLOGY   = 'topology'
+
+TOPICS = {
+    TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_POLICY, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY
+}
+
+CONSUME_TIMEOUT = 0.5 # seconds
 
 class Message(NamedTuple):
     topic: str
@@ -54,8 +69,10 @@ class MockMessageBroker:
     def terminate(self):
         self._terminate.set()
 
-def notify_event(messagebroker, topic_name, event_type, fields) -> None:
-    event = {'event': {'timestamp': time.time(), 'event_type': event_type}}
+def notify_event(
+    messagebroker : MockMessageBroker, topic_name : str, event_type : EventTypeEnum, fields : Dict[str, str]
+) -> None:
+    event = {'event': {'timestamp': {'timestamp': time.time()}, 'event_type': event_type}}
     for field_name, field_value in fields.items():
         event[field_name] = field_value
     messagebroker.publish(Message(topic_name, json.dumps(event)))
diff --git a/src/common/tests/MockServicerImpl_Context.py b/src/common/tests/MockServicerImpl_Context.py
index 27ff45fc58c675fe28090a186059244e2f1178c1..f33f25dc10fb90d043d64820dbf426afb4ad1785 100644
--- a/src/common/tests/MockServicerImpl_Context.py
+++ b/src/common/tests/MockServicerImpl_Context.py
@@ -24,19 +24,13 @@ from common.proto.context_pb2 import (
     Slice, SliceEvent, SliceId, SliceIdList, SliceList,
     Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
 from common.proto.context_pb2_grpc import ContextServiceServicer
-from common.tests.MockMessageBroker import MockMessageBroker, notify_event
+from common.tests.MockMessageBroker import (
+    TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY,
+    MockMessageBroker, notify_event)
 from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string
 
 LOGGER = logging.getLogger(__name__)
 
-TOPIC_CONNECTION = 'connection'
-TOPIC_CONTEXT    = 'context'
-TOPIC_TOPOLOGY   = 'topology'
-TOPIC_DEVICE     = 'device'
-TOPIC_LINK       = 'link'
-TOPIC_SERVICE    = 'service'
-TOPIC_SLICE      = 'slice'
-
 def get_container(database : Dict[str, Dict[str, Any]], container_name : str) -> Dict[str, Any]:
     return database.setdefault(container_name, {})
 
@@ -103,23 +97,33 @@ class MockServicerImpl_Context(ContextServiceServicer):
 
     def ListContextIds(self, request: Empty, context : grpc.ServicerContext) -> ContextIdList:
         LOGGER.info('[ListContextIds] request={:s}'.format(grpc_message_to_json_string(request)))
-        return ContextIdList(context_ids=[context.context_id for context in get_entries(self.database, 'context')])
+        reply = ContextIdList(context_ids=[context.context_id for context in get_entries(self.database, 'context')])
+        LOGGER.info('[ListContextIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def ListContexts(self, request: Empty, context : grpc.ServicerContext) -> ContextList:
         LOGGER.info('[ListContexts] request={:s}'.format(grpc_message_to_json_string(request)))
-        return ContextList(contexts=get_entries(self.database, 'context'))
+        reply = ContextList(contexts=get_entries(self.database, 'context'))
+        LOGGER.info('[ListContexts] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def GetContext(self, request: ContextId, context : grpc.ServicerContext) -> Context:
         LOGGER.info('[GetContext] request={:s}'.format(grpc_message_to_json_string(request)))
-        return get_entry(context, self.database, 'context', request.context_uuid.uuid)
+        reply = get_entry(context, self.database, 'context', request.context_uuid.uuid)
+        LOGGER.info('[GetContext] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId:
         LOGGER.info('[SetContext] request={:s}'.format(grpc_message_to_json_string(request)))
-        return self._set(request, 'context', request.context_id.context_uuid.uuid, 'context_id', TOPIC_CONTEXT)
+        reply = self._set(request, 'context', request.context_id.context_uuid.uuid, 'context_id', TOPIC_CONTEXT)
+        LOGGER.info('[SetContext] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty:
         LOGGER.info('[RemoveContext] request={:s}'.format(grpc_message_to_json_string(request)))
-        return self._del(request, 'context', request.context_uuid.uuid, 'context_id', TOPIC_CONTEXT, context)
+        reply = self._del(request, 'context', request.context_uuid.uuid, 'context_id', TOPIC_CONTEXT, context)
+        LOGGER.info('[RemoveContext] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]:
         LOGGER.info('[GetContextEvents] request={:s}'.format(grpc_message_to_json_string(request)))
@@ -131,29 +135,39 @@ class MockServicerImpl_Context(ContextServiceServicer):
     def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList:
         LOGGER.info('[ListTopologyIds] request={:s}'.format(grpc_message_to_json_string(request)))
         topologies = get_entries(self.database, 'topology[{:s}]'.format(str(request.context_uuid.uuid)))
-        return TopologyIdList(topology_ids=[topology.topology_id for topology in topologies])
+        reply = TopologyIdList(topology_ids=[topology.topology_id for topology in topologies])
+        LOGGER.info('[ListTopologyIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def ListTopologies(self, request: ContextId, context : grpc.ServicerContext) -> TopologyList:
         LOGGER.info('[ListTopologies] request={:s}'.format(grpc_message_to_json_string(request)))
         topologies = get_entries(self.database, 'topology[{:s}]'.format(str(request.context_uuid.uuid)))
-        return TopologyList(topologies=[topology for topology in topologies])
+        reply = TopologyList(topologies=[topology for topology in topologies])
+        LOGGER.info('[ListTopologies] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology:
         LOGGER.info('[GetTopology] request={:s}'.format(grpc_message_to_json_string(request)))
         container_name = 'topology[{:s}]'.format(str(request.context_id.context_uuid.uuid))
-        return get_entry(context, self.database, container_name, request.topology_uuid.uuid)
+        reply = get_entry(context, self.database, container_name, request.topology_uuid.uuid)
+        LOGGER.info('[GetTopology] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId:
         LOGGER.info('[SetTopology] request={:s}'.format(grpc_message_to_json_string(request)))
         container_name = 'topology[{:s}]'.format(str(request.topology_id.context_id.context_uuid.uuid))
         topology_uuid = request.topology_id.topology_uuid.uuid
-        return self._set(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY)
+        reply = self._set(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY)
+        LOGGER.info('[SetTopology] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty:
         LOGGER.info('[RemoveTopology] request={:s}'.format(grpc_message_to_json_string(request)))
         container_name = 'topology[{:s}]'.format(str(request.context_id.context_uuid.uuid))
         topology_uuid = request.topology_uuid.uuid
-        return self._del(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY, context)
+        reply = self._del(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY, context)
+        LOGGER.info('[RemoveTopology] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]:
         LOGGER.info('[GetTopologyEvents] request={:s}'.format(grpc_message_to_json_string(request)))
@@ -164,23 +178,33 @@ class MockServicerImpl_Context(ContextServiceServicer):
 
     def ListDeviceIds(self, request: Empty, context : grpc.ServicerContext) -> DeviceIdList:
         LOGGER.info('[ListDeviceIds] request={:s}'.format(grpc_message_to_json_string(request)))
-        return DeviceIdList(device_ids=[device.device_id for device in get_entries(self.database, 'device')])
+        reply = DeviceIdList(device_ids=[device.device_id for device in get_entries(self.database, 'device')])
+        LOGGER.info('[ListDeviceIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def ListDevices(self, request: Empty, context : grpc.ServicerContext) -> DeviceList:
         LOGGER.info('[ListDevices] request={:s}'.format(grpc_message_to_json_string(request)))
-        return DeviceList(devices=get_entries(self.database, 'device'))
+        reply = DeviceList(devices=get_entries(self.database, 'device'))
+        LOGGER.info('[ListDevices] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def GetDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Device:
         LOGGER.info('[GetDevice] request={:s}'.format(grpc_message_to_json_string(request)))
-        return get_entry(context, self.database, 'device', request.device_uuid.uuid)
+        reply = get_entry(context, self.database, 'device', request.device_uuid.uuid)
+        LOGGER.info('[GetDevice] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def SetDevice(self, request: Context, context : grpc.ServicerContext) -> DeviceId:
         LOGGER.info('[SetDevice] request={:s}'.format(grpc_message_to_json_string(request)))
-        return self._set(request, 'device', request.device_id.device_uuid.uuid, 'device_id', TOPIC_DEVICE)
+        reply = self._set(request, 'device', request.device_id.device_uuid.uuid, 'device_id', TOPIC_DEVICE)
+        LOGGER.info('[SetDevice] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def RemoveDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Empty:
         LOGGER.info('[RemoveDevice] request={:s}'.format(grpc_message_to_json_string(request)))
-        return self._del(request, 'device', request.device_uuid.uuid, 'device_id', TOPIC_DEVICE, context)
+        reply = self._del(request, 'device', request.device_uuid.uuid, 'device_id', TOPIC_DEVICE, context)
+        LOGGER.info('[RemoveDevice] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def GetDeviceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]:
         LOGGER.info('[GetDeviceEvents] request={:s}'.format(grpc_message_to_json_string(request)))
@@ -191,23 +215,33 @@ class MockServicerImpl_Context(ContextServiceServicer):
 
     def ListLinkIds(self, request: Empty, context : grpc.ServicerContext) -> LinkIdList:
         LOGGER.info('[ListLinkIds] request={:s}'.format(grpc_message_to_json_string(request)))
-        return LinkIdList(link_ids=[link.link_id for link in get_entries(self.database, 'link')])
+        reply = LinkIdList(link_ids=[link.link_id for link in get_entries(self.database, 'link')])
+        LOGGER.info('[ListLinkIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def ListLinks(self, request: Empty, context : grpc.ServicerContext) -> LinkList:
         LOGGER.info('[ListLinks] request={:s}'.format(grpc_message_to_json_string(request)))
-        return LinkList(links=get_entries(self.database, 'link'))
+        reply = LinkList(links=get_entries(self.database, 'link'))
+        LOGGER.info('[ListLinks] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def GetLink(self, request: LinkId, context : grpc.ServicerContext) -> Link:
         LOGGER.info('[GetLink] request={:s}'.format(grpc_message_to_json_string(request)))
-        return get_entry(context, self.database, 'link', request.link_uuid.uuid)
+        reply = get_entry(context, self.database, 'link', request.link_uuid.uuid)
+        LOGGER.info('[GetLink] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def SetLink(self, request: Context, context : grpc.ServicerContext) -> LinkId:
         LOGGER.info('[SetLink] request={:s}'.format(grpc_message_to_json_string(request)))
-        return self._set(request, 'link', request.link_id.link_uuid.uuid, 'link_id', TOPIC_LINK)
+        reply = self._set(request, 'link', request.link_id.link_uuid.uuid, 'link_id', TOPIC_LINK)
+        LOGGER.info('[SetLink] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def RemoveLink(self, request: LinkId, context : grpc.ServicerContext) -> Empty:
         LOGGER.info('[RemoveLink] request={:s}'.format(grpc_message_to_json_string(request)))
-        return self._del(request, 'link', request.link_uuid.uuid, 'link_id', TOPIC_LINK, context)
+        reply = self._del(request, 'link', request.link_uuid.uuid, 'link_id', TOPIC_LINK, context)
+        LOGGER.info('[RemoveLink] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def GetLinkEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]:
         LOGGER.info('[GetLinkEvents] request={:s}'.format(grpc_message_to_json_string(request)))
@@ -219,29 +253,39 @@ class MockServicerImpl_Context(ContextServiceServicer):
     def ListSliceIds(self, request: ContextId, context : grpc.ServicerContext) -> SliceIdList:
         LOGGER.info('[ListSliceIds] request={:s}'.format(grpc_message_to_json_string(request)))
         slices = get_entries(self.database, 'slice[{:s}]'.format(str(request.context_uuid.uuid)))
-        return SliceIdList(slice_ids=[slice.slice_id for slice in slices])
+        reply = SliceIdList(slice_ids=[slice.slice_id for slice in slices])
+        LOGGER.info('[ListSliceIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def ListSlices(self, request: ContextId, context : grpc.ServicerContext) -> SliceList:
         LOGGER.info('[ListSlices] request={:s}'.format(grpc_message_to_json_string(request)))
         slices = get_entries(self.database, 'slice[{:s}]'.format(str(request.context_uuid.uuid)))
-        return SliceList(slices=[slice for slice in slices])
+        reply = SliceList(slices=[slice for slice in slices])
+        LOGGER.info('[ListSlices] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def GetSlice(self, request: SliceId, context : grpc.ServicerContext) -> Slice:
         LOGGER.info('[GetSlice] request={:s}'.format(grpc_message_to_json_string(request)))
         container_name = 'slice[{:s}]'.format(str(request.context_id.context_uuid.uuid))
-        return get_entry(context, self.database, container_name, request.slice_uuid.uuid)
+        reply = get_entry(context, self.database, container_name, request.slice_uuid.uuid)
+        LOGGER.info('[GetSlice] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def SetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId:
         LOGGER.info('[SetSlice] request={:s}'.format(grpc_message_to_json_string(request)))
         container_name = 'slice[{:s}]'.format(str(request.slice_id.context_id.context_uuid.uuid))
         slice_uuid = request.slice_id.slice_uuid.uuid
-        return self._set(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE)
+        reply = self._set(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE)
+        LOGGER.info('[SetSlice] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def RemoveSlice(self, request: SliceId, context : grpc.ServicerContext) -> Empty:
         LOGGER.info('[RemoveSlice] request={:s}'.format(grpc_message_to_json_string(request)))
         container_name = 'slice[{:s}]'.format(str(request.context_id.context_uuid.uuid))
         slice_uuid = request.slice_uuid.uuid
-        return self._del(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE, context)
+        reply = self._del(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE, context)
+        LOGGER.info('[RemoveSlice] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def GetSliceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]:
         LOGGER.info('[GetSliceEvents] request={:s}'.format(grpc_message_to_json_string(request)))
@@ -253,29 +297,39 @@ class MockServicerImpl_Context(ContextServiceServicer):
     def ListServiceIds(self, request: ContextId, context : grpc.ServicerContext) -> ServiceIdList:
         LOGGER.info('[ListServiceIds] request={:s}'.format(grpc_message_to_json_string(request)))
         services = get_entries(self.database, 'service[{:s}]'.format(str(request.context_uuid.uuid)))
-        return ServiceIdList(service_ids=[service.service_id for service in services])
+        reply = ServiceIdList(service_ids=[service.service_id for service in services])
+        LOGGER.info('[ListServiceIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def ListServices(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList:
         LOGGER.info('[ListServices] request={:s}'.format(grpc_message_to_json_string(request)))
         services = get_entries(self.database, 'service[{:s}]'.format(str(request.context_uuid.uuid)))
-        return ServiceList(services=[service for service in services])
+        reply = ServiceList(services=[service for service in services])
+        LOGGER.info('[ListServices] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def GetService(self, request: ServiceId, context : grpc.ServicerContext) -> Service:
         LOGGER.info('[GetService] request={:s}'.format(grpc_message_to_json_string(request)))
         container_name = 'service[{:s}]'.format(str(request.context_id.context_uuid.uuid))
-        return get_entry(context, self.database, container_name, request.service_uuid.uuid)
+        reply = get_entry(context, self.database, container_name, request.service_uuid.uuid)
+        LOGGER.info('[GetService] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId:
         LOGGER.info('[SetService] request={:s}'.format(grpc_message_to_json_string(request)))
         container_name = 'service[{:s}]'.format(str(request.service_id.context_id.context_uuid.uuid))
         service_uuid = request.service_id.service_uuid.uuid
-        return self._set(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE)
+        reply = self._set(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE)
+        LOGGER.info('[SetService] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty:
         LOGGER.info('[RemoveService] request={:s}'.format(grpc_message_to_json_string(request)))
         container_name = 'service[{:s}]'.format(str(request.context_id.context_uuid.uuid))
         service_uuid = request.service_uuid.uuid
-        return self._del(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE, context)
+        reply = self._del(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE, context)
+        LOGGER.info('[RemoveService] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def GetServiceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]:
         LOGGER.info('[GetServiceEvents] request={:s}'.format(grpc_message_to_json_string(request)))
@@ -288,17 +342,23 @@ class MockServicerImpl_Context(ContextServiceServicer):
         LOGGER.info('[ListConnectionIds] request={:s}'.format(grpc_message_to_json_string(request)))
         container_name = 'service_connections[{:s}/{:s}]'.format(
             str(request.context_id.context_uuid.uuid), str(request.service_uuid.uuid))
-        return ConnectionIdList(connection_ids=[c.connection_id for c in get_entries(self.database, container_name)])
+        reply = ConnectionIdList(connection_ids=[c.connection_id for c in get_entries(self.database, container_name)])
+        LOGGER.info('[ListConnectionIds] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def ListConnections(self, request: ServiceId, context : grpc.ServicerContext) -> ConnectionList:
         LOGGER.info('[ListConnections] request={:s}'.format(grpc_message_to_json_string(request)))
         container_name = 'service_connections[{:s}/{:s}]'.format(
             str(request.context_id.context_uuid.uuid), str(request.service_uuid.uuid))
-        return ConnectionList(connections=get_entries(self.database, container_name))
+        reply = ConnectionList(connections=get_entries(self.database, container_name))
+        LOGGER.info('[ListConnections] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def GetConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Connection:
         LOGGER.info('[GetConnection] request={:s}'.format(grpc_message_to_json_string(request)))
-        return get_entry(context, self.database, 'connection', request.connection_uuid.uuid)
+        reply = get_entry(context, self.database, 'connection', request.connection_uuid.uuid)
+        LOGGER.info('[GetConnection] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def SetConnection(self, request: Connection, context : grpc.ServicerContext) -> ConnectionId:
         LOGGER.info('[SetConnection] request={:s}'.format(grpc_message_to_json_string(request)))
@@ -306,7 +366,9 @@ class MockServicerImpl_Context(ContextServiceServicer):
             str(request.service_id.context_id.context_uuid.uuid), str(request.service_id.service_uuid.uuid))
         connection_uuid = request.connection_id.connection_uuid.uuid
         set_entry(self.database, container_name, connection_uuid, request)
-        return self._set(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION)
+        reply = self._set(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION)
+        LOGGER.info('[SetConnection] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def RemoveConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Empty:
         LOGGER.info('[RemoveConnection] request={:s}'.format(grpc_message_to_json_string(request)))
@@ -315,7 +377,9 @@ class MockServicerImpl_Context(ContextServiceServicer):
             str(connection.service_id.context_id.context_uuid.uuid), str(connection.service_id.service_uuid.uuid))
         connection_uuid = request.connection_uuid.uuid
         del_entry(context, self.database, container_name, connection_uuid)
-        return self._del(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION, context)
+        reply = self._del(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION, context)
+        LOGGER.info('[RemoveConnection] reply={:s}'.format(grpc_message_to_json_string(reply)))
+        return reply
 
     def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]:
         LOGGER.info('[GetConnectionEvents] request={:s}'.format(grpc_message_to_json_string(request)))
diff --git a/src/common/tools/context_queries/Device.py b/src/common/tools/context_queries/Device.py
index e5b205d46185e12fa51a2cbd8146342abe5bed38..ed8772cf65b1fef0d950c6de3be4e178fe5fb472 100644
--- a/src/common/tools/context_queries/Device.py
+++ b/src/common/tools/context_queries/Device.py
@@ -12,11 +12,28 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import List, Set
-from common.proto.context_pb2 import ContextId, Device, Empty, Topology, TopologyId
+import grpc, logging
+from typing import List, Optional, Set
+from common.proto.context_pb2 import ContextId, Device, DeviceId, Empty, Topology, TopologyId
 from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 
+LOGGER = logging.getLogger(__name__)
+
+def get_device(context_client : ContextClient, device_uuid : str, rw_copy : bool = False) -> Optional[Device]:
+    try:
+        # pylint: disable=no-member
+        device_id = DeviceId()
+        device_id.device_uuid.uuid = device_uuid
+        ro_device = context_client.GetDevice(device_id)
+        if not rw_copy: return ro_device
+        rw_device = Device()
+        rw_device.CopyFrom(ro_device)
+        return rw_device
+    except grpc.RpcError:
+        #LOGGER.exception('Unable to get Device({:s})'.format(str(device_uuid)))
+        return None
+
 def get_existing_device_uuids(context_client : ContextClient) -> Set[str]:
     existing_device_ids = context_client.ListDeviceIds(Empty())
     existing_device_uuids = {device_id.device_uuid.uuid for device_id in existing_device_ids.device_ids}
diff --git a/src/common/tools/context_queries/InterDomain.py b/src/common/tools/context_queries/InterDomain.py
index 0a202ccd810ed50beca4bb9a7b4441305623f1ed..f2d9aa26d924fed1be58c1c390a63c2d79055827 100644
--- a/src/common/tools/context_queries/InterDomain.py
+++ b/src/common/tools/context_queries/InterDomain.py
@@ -14,7 +14,7 @@
 
 import logging
 from typing import Dict, List, Set, Tuple
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME
 from common.DeviceTypes import DeviceTypeEnum
 from common.proto.context_pb2 import ContextId, Device, Empty, EndPointId, ServiceTypeEnum, Slice
 from common.proto.pathcomp_pb2 import PathCompRequest
@@ -28,40 +28,40 @@ from pathcomp.frontend.client.PathCompClient import PathCompClient
 
 LOGGER = logging.getLogger(__name__)
 
-ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 DATACENTER_DEVICE_TYPES = {DeviceTypeEnum.DATACENTER, DeviceTypeEnum.EMULATED_DATACENTER}
 
 def get_local_device_uuids(context_client : ContextClient) -> Set[str]:
     topologies = context_client.ListTopologies(ADMIN_CONTEXT_ID)
     topologies = {topology.topology_id.topology_uuid.uuid : topology for topology in topologies.topologies}
-    LOGGER.info('[get_local_device_uuids] topologies.keys()={:s}'.format(str(topologies.keys())))
+    LOGGER.debug('[get_local_device_uuids] topologies.keys()={:s}'.format(str(topologies.keys())))
 
     local_topology_uuids = set(topologies.keys())
-    local_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID)
-    LOGGER.info('[get_local_device_uuids] local_topology_uuids={:s}'.format(str(local_topology_uuids)))
+    local_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_NAME)
+    LOGGER.debug('[get_local_device_uuids] local_topology_uuids={:s}'.format(str(local_topology_uuids)))
 
     local_device_uuids = set()
 
-    # add topology names except DEFAULT_TOPOLOGY_UUID and INTERDOMAIN_TOPOLOGY_UUID; they are abstracted as a
+    # add topology names except DEFAULT_TOPOLOGY_NAME and INTERDOMAIN_TOPOLOGY_NAME; they are abstracted as a
     # local device in inter-domain and the name of the topology is used as abstract device name
     for local_topology_uuid in local_topology_uuids:
-        if local_topology_uuid == DEFAULT_TOPOLOGY_UUID: continue
+        if local_topology_uuid == DEFAULT_TOPOLOGY_NAME: continue
         local_device_uuids.add(local_topology_uuid)
 
     # add physical devices in the local topologies
     for local_topology_uuid in local_topology_uuids:
         topology_device_ids = topologies[local_topology_uuid].device_ids
         topology_device_uuids = {device_id.device_uuid.uuid for device_id in topology_device_ids}
-        LOGGER.info('[get_local_device_uuids] [loop] local_topology_uuid={:s} topology_device_uuids={:s}'.format(
+        LOGGER.debug('[get_local_device_uuids] [loop] local_topology_uuid={:s} topology_device_uuids={:s}'.format(
             str(local_topology_uuid), str(topology_device_uuids)))
         local_device_uuids.update(topology_device_uuids)
 
-    LOGGER.info('[get_local_device_uuids] local_device_uuids={:s}'.format(str(local_device_uuids)))
+    LOGGER.debug('[get_local_device_uuids] local_device_uuids={:s}'.format(str(local_device_uuids)))
     return local_device_uuids
 
 def get_interdomain_device_uuids(context_client : ContextClient) -> Set[str]:
-    context_uuid = DEFAULT_CONTEXT_UUID
-    topology_uuid = INTERDOMAIN_TOPOLOGY_UUID
+    context_uuid = DEFAULT_CONTEXT_NAME
+    topology_uuid = INTERDOMAIN_TOPOLOGY_NAME
     interdomain_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid)
     if interdomain_topology is None:
         MSG = '[get_interdomain_device_uuids] {:s}/{:s} topology not found'
@@ -71,7 +71,7 @@ def get_interdomain_device_uuids(context_client : ContextClient) -> Set[str]:
     # add abstracted devices in the interdomain topology
     interdomain_device_ids = interdomain_topology.device_ids
     interdomain_device_uuids = {device_id.device_uuid.uuid for device_id in interdomain_device_ids}
-    LOGGER.info('[get_interdomain_device_uuids] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids)))
+    LOGGER.debug('[get_interdomain_device_uuids] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids)))
     return interdomain_device_uuids
 
 def get_local_domain_devices(context_client : ContextClient) -> List[Device]:
@@ -87,7 +87,7 @@ def get_local_domain_devices(context_client : ContextClient) -> List[Device]:
 
 def is_inter_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool:
     interdomain_device_uuids = get_interdomain_device_uuids(context_client)
-    LOGGER.info('[is_inter_domain] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids)))
+    LOGGER.debug('[is_inter_domain] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids)))
     non_interdomain_endpoint_ids = [
         endpoint_id
         for endpoint_id in endpoint_ids
@@ -97,14 +97,14 @@ def is_inter_domain(context_client : ContextClient, endpoint_ids : List[EndPoint
         (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid)
         for endpoint_id in non_interdomain_endpoint_ids
     ]
-    LOGGER.info('[is_inter_domain] non_interdomain_endpoint_ids={:s}'.format(str(str_non_interdomain_endpoint_ids)))
+    LOGGER.debug('[is_inter_domain] non_interdomain_endpoint_ids={:s}'.format(str(str_non_interdomain_endpoint_ids)))
     is_inter_domain_ = len(non_interdomain_endpoint_ids) == 0
-    LOGGER.info('[is_inter_domain] is_inter_domain={:s}'.format(str(is_inter_domain_)))
+    LOGGER.debug('[is_inter_domain] is_inter_domain={:s}'.format(str(is_inter_domain_)))
     return is_inter_domain_
 
 def is_multi_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool:
     local_device_uuids = get_local_device_uuids(context_client)
-    LOGGER.info('[is_multi_domain] local_device_uuids={:s}'.format(str(local_device_uuids)))
+    LOGGER.debug('[is_multi_domain] local_device_uuids={:s}'.format(str(local_device_uuids)))
     remote_endpoint_ids = [
         endpoint_id
         for endpoint_id in endpoint_ids
@@ -114,9 +114,9 @@ def is_multi_domain(context_client : ContextClient, endpoint_ids : List[EndPoint
         (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid)
         for endpoint_id in remote_endpoint_ids
     ]
-    LOGGER.info('[is_multi_domain] remote_endpoint_ids={:s}'.format(str(str_remote_endpoint_ids)))
+    LOGGER.debug('[is_multi_domain] remote_endpoint_ids={:s}'.format(str(str_remote_endpoint_ids)))
     is_multi_domain_ = len(remote_endpoint_ids) > 0
-    LOGGER.info('[is_multi_domain] is_multi_domain={:s}'.format(str(is_multi_domain_)))
+    LOGGER.debug('[is_multi_domain] is_multi_domain={:s}'.format(str(is_multi_domain_)))
     return is_multi_domain_
 
 def compute_interdomain_path(
@@ -144,9 +144,9 @@ def compute_interdomain_path(
     constraint_lat.custom.constraint_type = 'latency[ms]'
     constraint_lat.custom.constraint_value = '100.0'
 
-    LOGGER.info('pathcomp_req = {:s}'.format(grpc_message_to_json_string(pathcomp_req)))
+    LOGGER.debug('pathcomp_req = {:s}'.format(grpc_message_to_json_string(pathcomp_req)))
     pathcomp_rep = pathcomp_client.Compute(pathcomp_req)
-    LOGGER.info('pathcomp_rep = {:s}'.format(grpc_message_to_json_string(pathcomp_rep)))
+    LOGGER.debug('pathcomp_rep = {:s}'.format(grpc_message_to_json_string(pathcomp_rep)))
 
     service = next(iter([
         service
@@ -186,13 +186,13 @@ def get_device_to_domain_map(context_client : ContextClient) -> Dict[str, str]:
         context_id = context.context_id
         context_uuid = context_id.context_uuid.uuid
         topologies = context_client.ListTopologies(context_id)
-        if context_uuid == DEFAULT_CONTEXT_UUID:
+        if context_uuid == DEFAULT_CONTEXT_NAME:
             for topology in topologies.topologies:
                 topology_id = topology.topology_id
                 topology_uuid = topology_id.topology_uuid.uuid
-                if topology_uuid in {DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID}: continue
+                if topology_uuid in {DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME}: continue
 
-                # add topology names except DEFAULT_TOPOLOGY_UUID and INTERDOMAIN_TOPOLOGY_UUID; they are
+                # add topology names except DEFAULT_TOPOLOGY_NAME and INTERDOMAIN_TOPOLOGY_NAME; they are
                 # abstracted as a local device in inter-domain and the name of the topology is used as
                 # abstract device name
                 devices_to_domains[topology_uuid] = topology_uuid
@@ -208,7 +208,7 @@ def get_device_to_domain_map(context_client : ContextClient) -> Dict[str, str]:
                 topology_uuid = topology_id.topology_uuid.uuid
 
                 # if topology is not interdomain
-                if topology_uuid in {INTERDOMAIN_TOPOLOGY_UUID}: continue
+                if topology_uuid in {INTERDOMAIN_TOPOLOGY_NAME}: continue
 
                 # add devices to the remote domain list
                 for device_id in topology.device_ids:
@@ -222,16 +222,16 @@ def compute_traversed_domains(
 ) -> List[Tuple[str, bool, List[EndPointId]]]:
 
     local_device_uuids = get_local_device_uuids(context_client)
-    LOGGER.info('[compute_traversed_domains] local_device_uuids={:s}'.format(str(local_device_uuids)))
+    LOGGER.debug('[compute_traversed_domains] local_device_uuids={:s}'.format(str(local_device_uuids)))
 
-    interdomain_devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID)
+    interdomain_devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
     interdomain_devices = {
         device.device_id.device_uuid.uuid : device
         for device in interdomain_devices
     }
 
     devices_to_domains = get_device_to_domain_map(context_client)
-    LOGGER.info('[compute_traversed_domains] devices_to_domains={:s}'.format(str(devices_to_domains)))
+    LOGGER.debug('[compute_traversed_domains] devices_to_domains={:s}'.format(str(devices_to_domains)))
 
     traversed_domains : List[Tuple[str, bool, List[EndPointId]]] = list()
     domains_dict : Dict[str, Tuple[str, bool, List[EndPointId]]] = dict()
@@ -252,5 +252,5 @@ def compute_traversed_domains(
         ])
         for domain_uuid,is_local_domain,endpoint_ids in traversed_domains
     ]
-    LOGGER.info('[compute_traversed_domains] devices_to_domains={:s}'.format(str(str_traversed_domains)))
+    LOGGER.debug('[compute_traversed_domains] devices_to_domains={:s}'.format(str(str_traversed_domains)))
     return traversed_domains
diff --git a/src/common/tools/context_queries/Service.py b/src/common/tools/context_queries/Service.py
index 15b201e731760068457683d9e30f79ab12d231d7..b7ff4117b683f091e501588e8d1386c9c6726ae7 100644
--- a/src/common/tools/context_queries/Service.py
+++ b/src/common/tools/context_queries/Service.py
@@ -14,14 +14,14 @@
 
 import grpc, logging
 from typing import Optional
-from common.Constants import DEFAULT_CONTEXT_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME
 from common.proto.context_pb2 import Service, ServiceId
 from context.client.ContextClient import ContextClient
 
 LOGGER = logging.getLogger(__name__)
 
 def get_service(
-        context_client : ContextClient, service_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID,
+        context_client : ContextClient, service_uuid : str, context_uuid : str = DEFAULT_CONTEXT_NAME,
         rw_copy : bool = False
     ) -> Optional[Service]:
     try:
diff --git a/src/common/tools/context_queries/Slice.py b/src/common/tools/context_queries/Slice.py
index 9f884aa94990c28ad786b3243aed948ddc7f9f34..550b2edaa29d86c92c467f65e28e2c560b94c877 100644
--- a/src/common/tools/context_queries/Slice.py
+++ b/src/common/tools/context_queries/Slice.py
@@ -14,14 +14,14 @@
 
 import grpc, logging
 from typing import Optional
-from common.Constants import DEFAULT_CONTEXT_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME
 from common.proto.context_pb2 import Slice, SliceId
 from context.client.ContextClient import ContextClient
 
 LOGGER = logging.getLogger(__name__)
 
 def get_slice(
-        context_client : ContextClient, slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID,
+        context_client : ContextClient, slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_NAME,
         rw_copy : bool = False
     ) -> Optional[Slice]:
     try:
diff --git a/src/common/tools/context_queries/Topology.py b/src/common/tools/context_queries/Topology.py
index 3d2077e965efb3e78ad9febbe54b4f0aaea5aef6..619babffda01f0849a80ee082849f7df3275affc 100644
--- a/src/common/tools/context_queries/Topology.py
+++ b/src/common/tools/context_queries/Topology.py
@@ -14,7 +14,7 @@
 
 import grpc, logging
 from typing import List, Optional
-from common.Constants import DEFAULT_CONTEXT_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME
 from common.proto.context_pb2 import ContextId, Topology, TopologyId
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Topology import json_topology
@@ -45,7 +45,7 @@ def create_missing_topologies(
         context_client.SetTopology(grpc_topology)
 
 def get_topology(
-        context_client : ContextClient, topology_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID,
+        context_client : ContextClient, topology_uuid : str, context_uuid : str = DEFAULT_CONTEXT_NAME,
         rw_copy : bool = False
     ) -> Optional[Topology]:
     try:
diff --git a/src/common/tools/object_factory/Context.py b/src/common/tools/object_factory/Context.py
index d5d1bf9439dd12c67d77bcbe38f37fb29c89d948..58f35b9296126f4d0102f36ff34accbbede1331a 100644
--- a/src/common/tools/object_factory/Context.py
+++ b/src/common/tools/object_factory/Context.py
@@ -12,12 +12,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from typing import Optional
+
 def json_context_id(context_uuid : str):
     return {'context_uuid': {'uuid': context_uuid}}
 
-def json_context(context_uuid : str):
-    return {
+def json_context(context_uuid : str, name : Optional[str] = None):
+    result = {
         'context_id'  : json_context_id(context_uuid),
         'topology_ids': [],
         'service_ids' : [],
+        'slice_ids'   : [],
     }
+    if name is not None: result['name'] = name
+    return result
diff --git a/src/common/tools/object_factory/PolicyRule.py b/src/common/tools/object_factory/PolicyRule.py
index 8702f931dfffef175ce6c25de24a10de8286effc..5094db2eebd1798a6ab17751c14cbf54cf18d06f 100644
--- a/src/common/tools/object_factory/PolicyRule.py
+++ b/src/common/tools/object_factory/PolicyRule.py
@@ -15,20 +15,26 @@
 import logging
 from typing import Dict, List, Optional
 from common.proto.policy_condition_pb2 import BooleanOperator
+from common.proto.policy_pb2 import PolicyRuleStateEnum
 
 LOGGER = logging.getLogger(__name__)
 
-def json_policy_rule_id(policy_rule_uuid : str) -> Dict:
-    return {'uuid': {'uuid': policy_rule_uuid}}
+def json_policyrule_id(policyrule_uuid : str) -> Dict:
+    return {'uuid': {'uuid': policyrule_uuid}}
 
-def json_policy_rule(
-    policy_rule_uuid : str, policy_priority : int = 1,
+def json_policyrule(
+    policyrule_uuid : str, policy_priority : int = 1,
+    policy_state : PolicyRuleStateEnum = PolicyRuleStateEnum.POLICY_UNDEFINED, policy_state_message : str = '',
     boolean_operator : BooleanOperator = BooleanOperator.POLICYRULE_CONDITION_BOOLEAN_AND,
     condition_list : List[Dict] = [], action_list : List[Dict] = [],
     service_id : Optional[Dict] = None, device_id_list : List[Dict] = []
 ) -> Dict:
     basic = {
-        'policyRuleId': json_policy_rule_id(policy_rule_uuid),
+        'policyRuleId': json_policyrule_id(policyrule_uuid),
+        'policyRuleState': {
+            'policyRuleState': policy_state,
+            'policyRuleStateMessage': policy_state_message,
+        },
         'priority': policy_priority,
         'conditionList': condition_list,
         'booleanOperator': boolean_operator,
@@ -37,12 +43,12 @@ def json_policy_rule(
 
     result = {}
     if service_id is not None:
-        policy_rule_type = 'service'
-        result[policy_rule_type] = {'policyRuleBasic': basic}
-        result[policy_rule_type]['serviceId'] = service_id
+        policyrule_type = 'service'
+        result[policyrule_type] = {'policyRuleBasic': basic}
+        result[policyrule_type]['serviceId'] = service_id
     else:
-        policy_rule_type = 'device'
-        result[policy_rule_type] = {'policyRuleBasic': basic}
+        policyrule_type = 'device'
+        result[policyrule_type] = {'policyRuleBasic': basic}
 
-    result[policy_rule_type]['deviceList'] = device_id_list
+    result[policyrule_type]['deviceList'] = device_id_list
     return result
diff --git a/src/common/tools/object_factory/Service.py b/src/common/tools/object_factory/Service.py
index 0b5ad820c565c50607180e0933795774fd5c2035..5c0a60776e65acf487696c6e18cb748915aca4b0 100644
--- a/src/common/tools/object_factory/Service.py
+++ b/src/common/tools/object_factory/Service.py
@@ -14,7 +14,7 @@
 
 import copy
 from typing import Dict, List, Optional
-from common.Constants import DEFAULT_CONTEXT_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME
 from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum
 from common.tools.object_factory.Context import json_context_id
 
@@ -44,7 +44,7 @@ def json_service(
 
 def json_service_l2nm_planned(
         service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [],
-        config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID
+        config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME
     ):
 
     return json_service(
@@ -54,7 +54,7 @@ def json_service_l2nm_planned(
 
 def json_service_l3nm_planned(
         service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [],
-        config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID
+        config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME
     ):
 
     return json_service(
@@ -64,7 +64,7 @@ def json_service_l3nm_planned(
 
 def json_service_tapi_planned(
         service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [],
-        config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID
+        config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME
     ):
 
     return json_service(
@@ -74,7 +74,7 @@ def json_service_tapi_planned(
 
 def json_service_p4_planned(
         service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [],
-        config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID
+        config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME
     ):
 
     return json_service(
diff --git a/src/common/tools/object_factory/Slice.py b/src/common/tools/object_factory/Slice.py
index 2376784e3237992ab3d18d9d70db41b3a3f23560..970b12ad9e41882319d6d3dfdbaa052a6ce92909 100644
--- a/src/common/tools/object_factory/Slice.py
+++ b/src/common/tools/object_factory/Slice.py
@@ -14,7 +14,7 @@
 
 import copy
 from typing import Dict, List, Optional
-from common.Constants import DEFAULT_CONTEXT_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME
 from common.proto.context_pb2 import SliceStatusEnum
 from common.tools.object_factory.Context import json_context_id
 
@@ -32,13 +32,15 @@ def json_slice_owner(owner_uuid : str, owner_string : str) -> Dict:
     return {'owner_uuid': {'uuid': owner_uuid}, 'owner_string': owner_string}
 
 def json_slice(
-    slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID,
+    slice_uuid : str, context_id : Optional[Dict] = None,
     status : SliceStatusEnum = SliceStatusEnum.SLICESTATUS_PLANNED, endpoint_ids : List[Dict] = [],
     constraints : List[Dict] = [], config_rules : List[Dict] = [], service_ids : List[Dict] = [],
     subslice_ids : List[Dict] = [], owner : Optional[Dict] = None):
 
+    if context_id is None: context_id = json_context_id(DEFAULT_CONTEXT_NAME)
+
     result = {
-        'slice_id'          : json_slice_id(slice_uuid, context_id=json_context_id(context_uuid)),
+        'slice_id'          : json_slice_id(slice_uuid, context_id=context_id),
         'slice_status'      : {'slice_status': status},
         'slice_endpoint_ids': copy.deepcopy(endpoint_ids),
         'slice_constraints' : copy.deepcopy(constraints),
diff --git a/src/common/tools/object_factory/Topology.py b/src/common/tools/object_factory/Topology.py
index 7de4a1d577f1e46cfdf6545dde79b60808cd8afb..5f7a42d7a7382c3241996d5fdc94ebea93f6bae8 100644
--- a/src/common/tools/object_factory/Topology.py
+++ b/src/common/tools/object_factory/Topology.py
@@ -20,9 +20,11 @@ def json_topology_id(topology_uuid : str, context_id : Optional[Dict] = None):
     if context_id is not None: result['context_id'] = copy.deepcopy(context_id)
     return result
 
-def json_topology(topology_uuid : str, context_id : Optional[Dict] = None):
-    return {
+def json_topology(topology_uuid : str, name : Optional[str] = None, context_id : Optional[Dict] = None):
+    result = {
         'topology_id': json_topology_id(topology_uuid, context_id=context_id),
         'device_ids' : [],
         'link_ids'   : [],
     }
+    if name is not None: result['name'] = name
+    return result
diff --git a/src/compute/.gitlab-ci.yml b/src/compute/.gitlab-ci.yml
index 52b36e8196822c35503f3e644a0d57691fc5f5f0..d8614cd1c3841f96a3da18d033d0bf1891d83c86 100644
--- a/src/compute/.gitlab-ci.yml
+++ b/src/compute/.gitlab-ci.yml
@@ -39,7 +39,7 @@ build compute:
       - .gitlab-ci.yml
 
 # Apply unit test to the component
-unit test compute:
+unit_test compute:
   variables:
     IMAGE_NAME: 'compute' # name of the microservice
     IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
@@ -79,28 +79,28 @@ unit test compute:
       reports:
         junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
 
-# Deployment of the service in Kubernetes Cluster
-deploy compute:
-  variables:
-    IMAGE_NAME: 'compute' # name of the microservice
-    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
-  stage: deploy
-  needs:
-    - unit test compute
-    # - integ_test execute
-  script:
-    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
-    - kubectl version
-    - kubectl get all
-    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
-    - kubectl get all
-  # environment:
-  #   name: test
-  #   url: https://example.com
-  #   kubernetes:
-  #     namespace: test
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
-      when: manual    
-    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
-      when: manual
+## Deployment of the service in Kubernetes Cluster
+#deploy compute:
+#  variables:
+#    IMAGE_NAME: 'compute' # name of the microservice
+#    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+#  stage: deploy
+#  needs:
+#    - unit test compute
+#    # - integ_test execute
+#  script:
+#    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+#    - kubectl get all
+#  # environment:
+#  #   name: test
+#  #   url: https://example.com
+#  #   kubernetes:
+#  #     namespace: test
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual    
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
diff --git a/src/compute/service/__main__.py b/src/compute/service/__main__.py
index e80681e177f0f0def3dbe75d76e7e65ceaca1e87..71db89c6536100f0099ffd1a1d49354c6e9fd615 100644
--- a/src/compute/service/__main__.py
+++ b/src/compute/service/__main__.py
@@ -20,6 +20,7 @@ from common.Settings import (
     wait_for_environment_variables)
 from .ComputeService import ComputeService
 from .rest_server.RestServer import RestServer
+from .rest_server.nbi_plugins.debug_api import register_debug_api
 from .rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn
 
 terminate = threading.Event()
@@ -57,6 +58,7 @@ def main():
     grpc_service.start()
 
     rest_server = RestServer()
+    register_debug_api(rest_server)
     register_ietf_l2vpn(rest_server)
     rest_server.start()
 
diff --git a/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py b/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcbc600de4b2e471c1ebd121d26c51ad227ab5f9
--- /dev/null
+++ b/src/compute/service/rest_server/nbi_plugins/debug_api/Resources.py
@@ -0,0 +1,158 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from flask.json import jsonify
+from flask_restful import Resource
+from common.proto.context_pb2 import ConnectionId, ContextId, DeviceId, Empty, LinkId, ServiceId, SliceId, TopologyId
+from common.proto.policy_pb2 import PolicyRuleId
+from common.tools.grpc.Tools import grpc_message_to_json
+from common.tools.object_factory.Connection import json_connection_id
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Device import json_device_id
+from common.tools.object_factory.Link import json_link_id
+from common.tools.object_factory.PolicyRule import json_policyrule_id
+from common.tools.object_factory.Service import json_service_id
+from common.tools.object_factory.Slice import json_slice_id
+from common.tools.object_factory.Topology import json_topology_id
+from context.client.ContextClient import ContextClient
+
+
+def format_grpc_to_json(grpc_reply):
+    return jsonify(grpc_message_to_json(grpc_reply))
+
+def grpc_connection_id(connection_uuid):
+    return ConnectionId(**json_connection_id(connection_uuid))
+
+def grpc_context_id(context_uuid):
+    return ContextId(**json_context_id(context_uuid))
+
+def grpc_device_id(device_uuid):
+    return DeviceId(**json_device_id(device_uuid))
+
+def grpc_link_id(link_uuid):
+    return LinkId(**json_link_id(link_uuid))
+
+def grpc_service_id(context_uuid, service_uuid):
+    return ServiceId(**json_service_id(service_uuid, context_id=json_context_id(context_uuid)))
+
+def grpc_slice_id(context_uuid, slice_uuid):
+    return SliceId(**json_slice_id(slice_uuid, context_id=json_context_id(context_uuid)))
+    
+def grpc_topology_id(context_uuid, topology_uuid):
+    return TopologyId(**json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)))
+
+def grpc_policy_rule_id(policy_rule_uuid):
+    return PolicyRuleId(**json_policyrule_id(policy_rule_uuid))
+
+
+class _Resource(Resource):
+    def __init__(self) -> None:
+        super().__init__()
+        self.client = ContextClient()
+
+class ContextIds(_Resource):
+    def get(self):
+        return format_grpc_to_json(self.client.ListContextIds(Empty()))
+
+class Contexts(_Resource):
+    def get(self):
+        return format_grpc_to_json(self.client.ListContexts(Empty()))
+
+class Context(_Resource):
+    def get(self, context_uuid : str):
+        return format_grpc_to_json(self.client.GetContext(grpc_context_id(context_uuid)))
+
+class TopologyIds(_Resource):
+    def get(self, context_uuid : str):
+        return format_grpc_to_json(self.client.ListTopologyIds(grpc_context_id(context_uuid)))
+
+class Topologies(_Resource):
+    def get(self, context_uuid : str):
+        return format_grpc_to_json(self.client.ListTopologies(grpc_context_id(context_uuid)))
+
+class Topology(_Resource):
+    def get(self, context_uuid : str, topology_uuid : str):
+        return format_grpc_to_json(self.client.GetTopology(grpc_topology_id(context_uuid, topology_uuid)))
+
+class ServiceIds(_Resource):
+    def get(self, context_uuid : str):
+        return format_grpc_to_json(self.client.ListServiceIds(grpc_context_id(context_uuid)))
+
+class Services(_Resource):
+    def get(self, context_uuid : str):
+        return format_grpc_to_json(self.client.ListServices(grpc_context_id(context_uuid)))
+
+class Service(_Resource):
+    def get(self, context_uuid : str, service_uuid : str):
+        return format_grpc_to_json(self.client.GetService(grpc_service_id(context_uuid, service_uuid)))
+
+class SliceIds(_Resource):
+    def get(self, context_uuid : str):
+        return format_grpc_to_json(self.client.ListSliceIds(grpc_context_id(context_uuid)))
+
+class Slices(_Resource):
+    def get(self, context_uuid : str):
+        return format_grpc_to_json(self.client.ListSlices(grpc_context_id(context_uuid)))
+
+class Slice(_Resource):
+    def get(self, context_uuid : str, slice_uuid : str):
+        return format_grpc_to_json(self.client.GetSlice(grpc_slice_id(context_uuid, slice_uuid)))
+
+class DeviceIds(_Resource):
+    def get(self):
+        return format_grpc_to_json(self.client.ListDeviceIds(Empty()))
+
+class Devices(_Resource):
+    def get(self):
+        return format_grpc_to_json(self.client.ListDevices(Empty()))
+
+class Device(_Resource):
+    def get(self, device_uuid : str):
+        return format_grpc_to_json(self.client.GetDevice(grpc_device_id(device_uuid)))
+
+class LinkIds(_Resource):
+    def get(self):
+        return format_grpc_to_json(self.client.ListLinkIds(Empty()))
+
+class Links(_Resource):
+    def get(self):
+        return format_grpc_to_json(self.client.ListLinks(Empty()))
+
+class Link(_Resource):
+    def get(self, link_uuid : str):
+        return format_grpc_to_json(self.client.GetLink(grpc_link_id(link_uuid)))
+
+class ConnectionIds(_Resource):
+    def get(self, context_uuid : str, service_uuid : str):
+        return format_grpc_to_json(self.client.ListConnectionIds(grpc_service_id(context_uuid, service_uuid)))
+
+class Connections(_Resource):
+    def get(self, context_uuid : str, service_uuid : str):
+        return format_grpc_to_json(self.client.ListConnections(grpc_service_id(context_uuid, service_uuid)))
+
+class Connection(_Resource):
+    def get(self, connection_uuid : str):
+        return format_grpc_to_json(self.client.GetConnection(grpc_connection_id(connection_uuid)))
+
+class PolicyRuleIds(_Resource):
+    def get(self):
+        return format_grpc_to_json(self.client.ListPolicyRuleIds(Empty()))
+
+class PolicyRules(_Resource):
+    def get(self):
+        return format_grpc_to_json(self.client.ListPolicyRules(Empty()))
+
+class PolicyRule(_Resource):
+    def get(self, policy_rule_uuid : str):
+        return format_grpc_to_json(self.client.GetPolicyRule(grpc_policy_rule_id(policy_rule_uuid)))
diff --git a/src/compute/service/rest_server/nbi_plugins/debug_api/__init__.py b/src/compute/service/rest_server/nbi_plugins/debug_api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4fca3b5346dba53d28c2790b3b251f5ec10a24a0
--- /dev/null
+++ b/src/compute/service/rest_server/nbi_plugins/debug_api/__init__.py
@@ -0,0 +1,65 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# RFC 8466 - L2VPN Service Model (L2SM)
+# Ref: https://datatracker.ietf.org/doc/html/rfc8466
+
+from compute.service.rest_server.RestServer import RestServer
+from .Resources import (
+    Connection, ConnectionIds, Connections, Context, ContextIds, Contexts, Device, DeviceIds, Devices, Link, LinkIds,
+    Links, PolicyRule, PolicyRuleIds, PolicyRules, Service, ServiceIds, Services, Slice, SliceIds, Slices, Topologies,
+    Topology, TopologyIds)
+
+URL_PREFIX = '/api'
+
+# Use 'path' type in Service and Sink because service_uuid and link_uuid might contain char '/' and Flask is unable to
+# recognize them in 'string' type.
+RESOURCES = [
+    # (endpoint_name, resource_class, resource_url)
+    ('api.context_ids',    ContextIds,    '/context_ids'),
+    ('api.contexts',       Contexts,      '/contexts'),
+    ('api.context',        Context,       '/context/<string:context_uuid>'),
+
+    ('api.topology_ids',   TopologyIds,   '/context/<string:context_uuid>/topology_ids'),
+    ('api.topologies',     Topologies,    '/context/<string:context_uuid>/topologies'),
+    ('api.topology',       Topology,      '/context/<string:context_uuid>/topology/<string:topology_uuid>'),
+
+    ('api.service_ids',    ServiceIds,    '/context/<string:context_uuid>/service_ids'),
+    ('api.services',       Services,      '/context/<string:context_uuid>/services'),
+    ('api.service',        Service,       '/context/<string:context_uuid>/service/<path:service_uuid>'),
+
+    ('api.slice_ids',      SliceIds,      '/context/<string:context_uuid>/slice_ids'),
+    ('api.slices',         Slices,        '/context/<string:context_uuid>/slices'),
+    ('api.slice',          Slice,         '/context/<string:context_uuid>/slice/<path:slice_uuid>'),
+
+    ('api.device_ids',     DeviceIds,     '/device_ids'),
+    ('api.devices',        Devices,       '/devices'),
+    ('api.device',         Device,        '/device/<string:device_uuid>'),
+
+    ('api.link_ids',       LinkIds,       '/link_ids'),
+    ('api.links',          Links,         '/links'),
+    ('api.link',           Link,          '/link/<path:link_uuid>'),
+
+    ('api.connection_ids', ConnectionIds, '/context/<string:context_uuid>/service/<path:service_uuid>/connection_ids'),
+    ('api.connections',    Connections,   '/context/<string:context_uuid>/service/<path:service_uuid>/connections'),
+    ('api.connection',     Connection,    '/connection/<path:connection_uuid>'),
+
+    ('api.policyrule_ids', PolicyRuleIds, '/policyrule_ids'),
+    ('api.policyrules',    PolicyRules,   '/policyrules'),
+    ('api.policyrule',     PolicyRule,    '/policyrule/<string:policyrule_uuid>'),
+]
+
+def register_debug_api(rest_server : RestServer):
+    for endpoint_name, resource_class, resource_url in RESOURCES:
+        rest_server.add_resource(resource_class, URL_PREFIX + resource_url, endpoint=endpoint_name)
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py
index f27d852f017a08cb8b854cc19568280b9de14470..d27e55047567941fb1467dbd2b1745c163dfd6c3 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py
@@ -18,7 +18,7 @@ from flask import request
 from flask.json import jsonify
 from flask_restful import Resource
 from werkzeug.exceptions import UnsupportedMediaType
-from common.Constants import DEFAULT_CONTEXT_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME
 from common.proto.context_pb2 import SliceStatusEnum, Slice
 from slice.client.SliceClient import SliceClient
 from .schemas.vpn_service import SCHEMA_VPN_SERVICE
@@ -45,14 +45,12 @@ class L2VPN_Services(Resource):
             try:
                 # pylint: disable=no-member
                 slice_request = Slice()
-                slice_request.slice_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_UUID
+                slice_request.slice_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME
                 slice_request.slice_id.slice_uuid.uuid = vpn_service['vpn-id']
                 slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED
 
                 slice_client = SliceClient()
-                slice_reply = slice_client.CreateSlice(slice_request)
-                if slice_reply != slice_request.slice_id: # pylint: disable=no-member
-                    raise Exception('Slice creation failed. Wrong Slice Id was returned')
+                slice_client.CreateSlice(slice_request)
 
                 response = jsonify({})
                 response.status_code = HTTP_CREATED
diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
index 819d8995da6ffc3a7913c8781e4021ce83665e29..0b8305ed78e3fcf52cde733b684a5bd7eb6c4d81 100644
--- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
+++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
@@ -129,9 +129,7 @@ def process_list_site_network_access(
         sna_request = process_site_network_access(context_client, site_id, site_network_access)
         LOGGER.debug('sna_request = {:s}'.format(grpc_message_to_json_string(sna_request)))
         try:
-            sna_reply = slice_client.UpdateSlice(sna_request)
-            if sna_reply != sna_request.slice_id: # pylint: disable=no-member
-                raise Exception('Slice update failed. Wrong Slice Id was returned')
+            slice_client.UpdateSlice(sna_request)
         except Exception as e: # pylint: disable=broad-except
             msg = 'Something went wrong Updating VPN {:s}'
             LOGGER.exception(msg.format(grpc_message_to_json_string(sna_request)))
diff --git a/src/compute/tests/MockService_Dependencies.py b/src/compute/tests/MockService_Dependencies.py
index 5ed9d4da9fa23ff43bbd72eb021ae4e4fecd9b9b..fbc4bd1a4956926151909d535e18bb244cdea97e 100644
--- a/src/compute/tests/MockService_Dependencies.py
+++ b/src/compute/tests/MockService_Dependencies.py
@@ -28,7 +28,7 @@ LOCAL_HOST = '127.0.0.1'
 
 SERVICE_CONTEXT = ServiceNameEnum.CONTEXT
 SERVICE_SERVICE = ServiceNameEnum.SERVICE
-SERVICE_SLICE = ServiceNameEnum.SLICE
+SERVICE_SLICE   = ServiceNameEnum.SLICE
 
 class MockService_Dependencies(GenericGrpcService):
     # Mock Service implementing Context, Service and Slice to simplify unitary tests of Compute
@@ -54,5 +54,5 @@ class MockService_Dependencies(GenericGrpcService):
         os.environ[get_env_var_name(SERVICE_SERVICE, ENVVAR_SUFIX_SERVICE_HOST     )] = str(self.bind_address)
         os.environ[get_env_var_name(SERVICE_SERVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
 
-        os.environ[get_env_var_name(SERVICE_SLICE, ENVVAR_SUFIX_SERVICE_HOST     )] = str(self.bind_address)
-        os.environ[get_env_var_name(SERVICE_SLICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
+        os.environ[get_env_var_name(SERVICE_SLICE,   ENVVAR_SUFIX_SERVICE_HOST     )] = str(self.bind_address)
+        os.environ[get_env_var_name(SERVICE_SLICE,   ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
diff --git a/src/compute/tests/PrepareTestScenario.py b/src/compute/tests/PrepareTestScenario.py
index 06fb34f9ee7508f4bd6fa769da78c50eb78c3bb8..7ef99f4b1817247b4645ce6ff25b260f51706f54 100644
--- a/src/compute/tests/PrepareTestScenario.py
+++ b/src/compute/tests/PrepareTestScenario.py
@@ -17,6 +17,7 @@ from common.Constants import ServiceNameEnum
 from common.Settings import (
     ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name, get_service_port_http)
 from compute.service.rest_server.RestServer import RestServer
+from compute.service.rest_server.nbi_plugins.debug_api import register_debug_api
 from compute.service.rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn
 from compute.tests.MockService_Dependencies import MockService_Dependencies
 from tests.tools.mock_osm.MockOSM import MockOSM
@@ -39,6 +40,7 @@ def mock_service():
 @pytest.fixture(scope='session')
 def compute_service_rest(mock_service):  # pylint: disable=redefined-outer-name
     _rest_server = RestServer()
+    register_debug_api(_rest_server)
     register_ietf_l2vpn(_rest_server)
     _rest_server.start()
     time.sleep(1) # bring time for the server to start
diff --git a/src/compute/tests/test_debug_api.py b/src/compute/tests/test_debug_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..6265c37511a5fd6ffb1647b2f76482e556c3b287
--- /dev/null
+++ b/src/compute/tests/test_debug_api.py
@@ -0,0 +1,228 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, os, pytest, requests, time, urllib
+from typing import Tuple
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, ServiceNameEnum
+from common.proto.context_pb2 import Connection, Context, Device, Link, Service, Slice, Topology
+from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name,
+    get_service_baseurl_http, get_service_port_grpc, get_service_port_http)
+from common.type_checkers.Assertions import (
+    validate_connection, validate_connection_ids, validate_connections, validate_context, validate_context_ids,
+    validate_contexts, validate_device, validate_device_ids, validate_devices, validate_link, validate_link_ids,
+    validate_links, validate_service, validate_service_ids, validate_services, validate_topologies, validate_topology,
+    validate_topology_ids)
+from context.client.ContextClient import ContextClient
+from .MockService_Dependencies import MockService_Dependencies
+from .Objects import (
+    CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID,
+    DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, DEVICE_R3, DEVICE_R3_ID, DEVICE_R3_UUID, LINK_R1_R2,
+    LINK_R1_R2_ID, LINK_R1_R2_UUID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, SERVICE_R1_R3,
+    SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, SLICE_R1_R3, TOPOLOGY,
+    TOPOLOGY_ID, POLICY_RULE, POLICY_RULE_ID, POLICY_RULE_UUID)
+
+
+@pytest.fixture(scope='session')
+def mock_service():
+    _service = MockService_Dependencies(MOCKSERVICE_PORT)
+    _service.configure_env_vars()
+    _service.start()
+    yield _service
+    _service.stop()
+
+
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+LOCAL_HOST = '127.0.0.1'
+GRPC_PORT = 10000 + int(get_service_port_grpc(ServiceNameEnum.CONTEXT))   # avoid privileged ports
+HTTP_PORT = 10000 + int(get_service_port_http(ServiceNameEnum.CONTEXT))   # avoid privileged ports
+
+MOCKSERVICE_PORT = 10000
+DEVICE_SERVICE_PORT = MOCKSERVICE_PORT + get_service_port_grpc(ServiceNameEnum.DEVICE) # avoid privileged ports
+
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT)
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(HTTP_PORT)
+
+@pytest.fixture(scope='session')
+def context_service_grpc():
+    _service = ContextService(context_s_mb[0], context_s_mb[1])
+    _service.start()
+    yield _service
+    _service.stop()
+
+@pytest.fixture(scope='session')
+def context_service_rest():
+    database = context_db_mb[0]
+    _rest_server = RestServer()
+    for endpoint_name, resource_class, resource_url in RESOURCES:
+        _rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,))
+    _rest_server.start()
+    time.sleep(1) # bring time for the server to start
+    yield _rest_server
+    _rest_server.shutdown()
+    _rest_server.join()
+
+@pytest.fixture(scope='session')
+def context_client_grpc(context_service_grpc : ContextService): # pylint: disable=redefined-outer-name
+    _client = ContextClient()
+    yield _client
+    _client.close()
+
+def test_populate_database():
+    client = ContextClient(host=LOCAL_HOST, port=GRPC_PORT)
+    client.SetContext(Context(**CONTEXT))
+    client.SetTopology(Topology(**TOPOLOGY))
+    client.SetDevice(Device(**DEVICE_R1))
+    client.SetDevice(Device(**DEVICE_R2))
+    client.SetDevice(Device(**DEVICE_R3))
+    client.SetLink(Link(**LINK_R1_R2))
+    client.SetLink(Link(**LINK_R1_R3))
+    client.SetLink(Link(**LINK_R2_R3))
+    client.SetService(Service(**SERVICE_R1_R2))
+    client.SetService(Service(**SERVICE_R1_R3))
+    client.SetService(Service(**SERVICE_R2_R3))
+    client.SetSlice(Slice(**SLICE_R1_R3))
+    client.SetConnection(Connection(**CONNECTION_R1_R3))
+
+def do_rest_request(url : str):
+    base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
+    request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
+    LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
+    reply = requests.get(request_url)
+    LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
+    assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
+    return reply.json()
+
+
+def test_rest_get_context_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/context_ids')
+    validate_context_ids(reply)
+
+def test_rest_get_contexts(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/contexts')
+    validate_contexts(reply)
+
+def test_rest_get_context(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME)
+    reply = do_rest_request('/context/{:s}'.format(context_uuid))
+    validate_context(reply)
+
+def test_rest_get_topology_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME)
+    reply = do_rest_request('/context/{:s}/topology_ids'.format(context_uuid))
+    validate_topology_ids(reply)
+
+def test_rest_get_topologies(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME)
+    reply = do_rest_request('/context/{:s}/topologies'.format(context_uuid))
+    validate_topologies(reply)
+
+def test_rest_get_topology(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME)
+    topology_uuid = urllib.parse.quote(DEFAULT_TOPOLOGY_NAME)
+    reply = do_rest_request('/context/{:s}/topology/{:s}'.format(context_uuid, topology_uuid))
+    validate_topology(reply, num_devices=3, num_links=3)
+
+def test_rest_get_service_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME)
+    reply = do_rest_request('/context/{:s}/service_ids'.format(context_uuid))
+    validate_service_ids(reply)
+
+def test_rest_get_services(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME)
+    reply = do_rest_request('/context/{:s}/services'.format(context_uuid))
+    validate_services(reply)
+
+def test_rest_get_service(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME)
+    service_uuid = urllib.parse.quote(SERVICE_R1_R2_UUID, safe='')
+    reply = do_rest_request('/context/{:s}/service/{:s}'.format(context_uuid, service_uuid))
+    validate_service(reply)
+
+def test_rest_get_slice_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME)
+    reply = do_rest_request('/context/{:s}/slice_ids'.format(context_uuid))
+    #validate_slice_ids(reply)
+
+def test_rest_get_slices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME)
+    reply = do_rest_request('/context/{:s}/slices'.format(context_uuid))
+    #validate_slices(reply)
+
+def test_rest_get_slice(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME)
+    slice_uuid = urllib.parse.quote(SLICE_R1_R3_UUID, safe='')
+    reply = do_rest_request('/context/{:s}/slice/{:s}'.format(context_uuid, slice_uuid))
+    #validate_slice(reply)
+
+def test_rest_get_device_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/device_ids')
+    validate_device_ids(reply)
+
+def test_rest_get_devices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/devices')
+    validate_devices(reply)
+
+def test_rest_get_device(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    device_uuid = urllib.parse.quote(DEVICE_R1_UUID, safe='')
+    reply = do_rest_request('/device/{:s}'.format(device_uuid))
+    validate_device(reply)
+
+def test_rest_get_link_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/link_ids')
+    validate_link_ids(reply)
+
+def test_rest_get_links(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/links')
+    validate_links(reply)
+
+def test_rest_get_link(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    link_uuid = urllib.parse.quote(LINK_R1_R2_UUID, safe='')
+    reply = do_rest_request('/link/{:s}'.format(link_uuid))
+    validate_link(reply)
+
+def test_rest_get_connection_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME)
+    service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='')
+    reply = do_rest_request('/context/{:s}/service/{:s}/connection_ids'.format(context_uuid, service_uuid))
+    validate_connection_ids(reply)
+
+def test_rest_get_connections(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_NAME)
+    service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='')
+    reply = do_rest_request('/context/{:s}/service/{:s}/connections'.format(context_uuid, service_uuid))
+    validate_connections(reply)
+
+def test_rest_get_connection(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    connection_uuid = urllib.parse.quote(CONNECTION_R1_R3_UUID, safe='')
+    reply = do_rest_request('/connection/{:s}'.format(connection_uuid))
+    validate_connection(reply)
+
+def test_rest_get_policyrule_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/policyrule_ids')
+    #validate_policyrule_ids(reply)
+
+def test_rest_get_policyrules(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/policyrules')
+    #validate_policyrules(reply)
+
+def test_rest_get_policyrule(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    policyrule_uuid = urllib.parse.quote(POLICYRULE_UUID, safe='')
+    reply = do_rest_request('/policyrule/{:s}'.format(policyrule_uuid))
+    #validate_policyrule(reply)
diff --git a/src/context/.gitlab-ci.yml b/src/context/.gitlab-ci.yml
index 0da2b582edf879f793341887adef9cef5ad4fff2..044600bc5b4d3d55d66c85fc2e5bed150630a094 100644
--- a/src/context/.gitlab-ci.yml
+++ b/src/context/.gitlab-ci.yml
@@ -39,7 +39,7 @@ build context:
       - .gitlab-ci.yml
 
 # Apply unit test to the component
-unit test context:
+unit_test context:
   variables:
     IMAGE_NAME: 'context' # name of the microservice
     IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
@@ -49,23 +49,55 @@ unit test context:
   before_script:
     - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
     - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
-    - if docker container ls | grep redis; then docker rm -f redis; else echo "redis image is not in the system"; fi
-    - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
+    - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi
+    - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi
+    - if docker container ls | grep nats; then docker rm -f nats; else echo "NATS container is not in the system"; fi
+    - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi
   script:
     - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
-    - docker pull "redis:6.2"
-    - docker run --name redis -d --network=teraflowbridge redis:6.2
-    - sleep 10
-    - docker run --name $IMAGE_NAME -d -p 1010:1010 --env "DB_BACKEND=redis" --env "REDIS_SERVICE_HOST=redis" --env "REDIS_SERVICE_PORT=6379" --env "REDIS_DATABASE_ID=0" -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+    - docker pull "cockroachdb/cockroach:latest-v22.2"
+    - docker pull "nats:2.9"
+    - docker volume create crdb
+    - >
+      docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080
+      --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123
+      --volume "crdb:/cockroach/cockroach-data"
+      cockroachdb/cockroach:latest-v22.2 start-single-node
+    - >
+      docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222
+      nats:2.9 --http_port 8222 --user tfs --pass tfs123
+    - echo "Waiting for initialization..."
+    - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done
+    - docker logs crdb
+    - while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done
+    - docker logs nats
     - docker ps -a
+    - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $CRDB_ADDRESS
+    - NATS_ADDRESS=$(docker inspect nats --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $NATS_ADDRESS
+    - >
+      docker run --name $IMAGE_NAME -d -p 1010:1010
+      --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require"
+      --env "MB_BACKEND=nats"
+      --env "NATS_URI=nats://tfs:tfs123@${NATS_ADDRESS}:4222"
+      --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+    - docker ps -a
+    - sleep 5
     - docker logs $IMAGE_NAME
-    - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
+    - >
+      docker exec -i $IMAGE_NAME bash -c
+      "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}_report.xml $IMAGE_NAME/tests/test_*.py"
     - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
-    - docker rm -f $IMAGE_NAME
-    - docker rm -f  redis
+    - docker rm -f $IMAGE_NAME crdb nats
+    - docker volume rm -f crdb
     - docker network rm teraflowbridge
+    - docker volume prune --force
+    - docker image prune --force
   rules:
     - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
     - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
@@ -83,28 +115,28 @@ unit test context:
       reports:
         junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
 
-# Deployment of the service in Kubernetes Cluster
-deploy context:
-  variables:
-    IMAGE_NAME: 'context' # name of the microservice
-    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
-  stage: deploy
-  needs:
-    - unit test context
-    # - integ_test execute
-  script:
-    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
-    - kubectl version
-    - kubectl get all
-    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
-    - kubectl get all
-  # environment:
-  #   name: test
-  #   url: https://example.com
-  #   kubernetes:
-  #     namespace: test
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
-      when: manual    
-    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
-      when: manual
+## Deployment of the service in Kubernetes Cluster
+#deploy context:
+#  variables:
+#    IMAGE_NAME: 'context' # name of the microservice
+#    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+#  stage: deploy
+#  needs:
+#    - unit test context
+#    # - integ_test execute
+#  script:
+#    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+#    - kubectl get all
+#  # environment:
+#  #   name: test
+#  #   url: https://example.com
+#  #   kubernetes:
+#  #     namespace: test
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual    
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
diff --git a/src/context/Config.py b/src/context/Config.py
index 6f5d1dc0b347dc5db27a2cfae973a4e5bdf7b4cc..70a33251242c51f49140e596b8208a19dd5245f7 100644
--- a/src/context/Config.py
+++ b/src/context/Config.py
@@ -12,5 +12,3 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Autopopulate the component with fake data for testing purposes?
-POPULATE_FAKE_DATA = False
diff --git a/src/context/client/EventsCollector.py b/src/context/client/EventsCollector.py
index f5fc3fbc735c2f62b39223b9ed20aa3730ecd11d..9ad6e101b5130d6bbb1e6b33ba926dc4c0c128b0 100644
--- a/src/context/client/EventsCollector.py
+++ b/src/context/client/EventsCollector.py
@@ -132,7 +132,7 @@ class EventsCollector:
                 if event is None: break
                 events.append(event)
         else:
-            for _ in range(count):
+            while len(events) < count:
                 if self._terminate.is_set(): break
                 event = self.get_event(block=block, timeout=timeout)
                 if event is None: continue
diff --git a/src/context/data/cleanup_commands.sql b/src/context/data/cleanup_commands.sql
new file mode 100644
index 0000000000000000000000000000000000000000..00a522d850d79078881031e79199ace59e389a3a
--- /dev/null
+++ b/src/context/data/cleanup_commands.sql
@@ -0,0 +1,12 @@
+USE tfs;
+
+DELETE FROM policyrule WHERE 1=1;
+DELETE FROM slice      WHERE 1=1;
+DELETE FROM connection WHERE 1=1;
+DELETE FROM service    WHERE 1=1;
+
+DELETE FROM link       WHERE 1=1;
+DELETE FROM endpoint   WHERE 1=1;
+DELETE FROM device     WHERE 1=1;
+DELETE FROM topology   WHERE 1=1;
+DELETE FROM context    WHERE 1=1;
diff --git a/src/context/requirements.in b/src/context/requirements.in
index 9cc7e71f2428fbb42693f47c911340e5f3f2dbc1..e4bb209c7f01e6173e764a870d24900622b2f748 100644
--- a/src/context/requirements.in
+++ b/src/context/requirements.in
@@ -1,4 +1,5 @@
-Flask==2.1.3
-Flask-RESTful==0.3.9
-redis==4.1.2
-requests==2.27.1
+nats-py==2.2.0
+psycopg2-binary==2.9.3
+SQLAlchemy==1.4.40
+sqlalchemy-cockroachdb==1.4.3
+SQLAlchemy-Utils==0.38.3
diff --git a/src/context/service/grpc_server/ContextService.py b/src/context/service/ContextService.py
similarity index 84%
rename from src/context/service/grpc_server/ContextService.py
rename to src/context/service/ContextService.py
index 5d4dd8bb991ed64a970f9815bb302fd33d51cf34..c4881ccf59ec3fae216bd536eed6a338503adaf9 100644
--- a/src/context/service/grpc_server/ContextService.py
+++ b/src/context/service/ContextService.py
@@ -12,10 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging, sqlalchemy
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_port_grpc
 from common.message_broker.MessageBroker import MessageBroker
-from common.orm.Database import Database
 from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
 from common.proto.context_policy_pb2_grpc import add_ContextPolicyServiceServicer_to_server
 from common.tools.service.GenericGrpcService import GenericGrpcService
@@ -23,12 +23,15 @@ from .ContextServiceServicerImpl import ContextServiceServicerImpl
 
 # Custom gRPC settings
 GRPC_MAX_WORKERS = 200 # multiple clients might keep connections alive for Get*Events() RPC methods
+LOGGER = logging.getLogger(__name__)
 
 class ContextService(GenericGrpcService):
-    def __init__(self, database : Database, messagebroker : MessageBroker, cls_name: str = __name__) -> None:
+    def __init__(
+        self, db_engine : sqlalchemy.engine.Engine, messagebroker : MessageBroker, cls_name: str = __name__
+    ) -> None:
         port = get_service_port_grpc(ServiceNameEnum.CONTEXT)
         super().__init__(port, max_workers=GRPC_MAX_WORKERS, cls_name=cls_name)
-        self.context_servicer = ContextServiceServicerImpl(database, messagebroker)
+        self.context_servicer = ContextServiceServicerImpl(db_engine, messagebroker)
 
     def install_servicers(self):
         add_ContextServiceServicer_to_server(self.context_servicer, self.server)
diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..82e28a7f13135735909a99b635cf26bb3e02e252
--- /dev/null
+++ b/src/context/service/ContextServiceServicerImpl.py
@@ -0,0 +1,339 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, json, logging, sqlalchemy
+from typing import Iterator
+from common.message_broker.MessageBroker import MessageBroker
+from common.proto.context_pb2 import (
+    Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList,
+    Context, ContextEvent, ContextId, ContextIdList, ContextList,
+    Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList,
+    Empty, EventTypeEnum,
+    Link, LinkEvent, LinkId, LinkIdList, LinkList,
+    Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList,
+    Slice, SliceEvent, SliceId, SliceIdList, SliceList,
+    Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
+from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule
+from common.proto.context_pb2_grpc import ContextServiceServicer
+from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+from .database.Connection import (
+    connection_delete, connection_get, connection_list_ids, connection_list_objs, connection_set)
+from .database.Context import context_delete, context_get, context_list_ids, context_list_objs, context_set
+from .database.Device import device_delete, device_get, device_list_ids, device_list_objs, device_set
+from .database.Link import link_delete, link_get, link_list_ids, link_list_objs, link_set
+from .database.PolicyRule import (
+    policyrule_delete, policyrule_get, policyrule_list_ids, policyrule_list_objs, policyrule_set)
+from .database.Service import service_delete, service_get, service_list_ids, service_list_objs, service_set
+from .database.Slice import slice_delete, slice_get, slice_list_ids, slice_list_objs, slice_set, slice_unset
+from .database.Topology import topology_delete, topology_get, topology_list_ids, topology_list_objs, topology_set
+from .Events import (
+    CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_POLICY, TOPIC_SERVICE,
+    TOPIC_SLICE, TOPIC_TOPOLOGY, notify_event)
+
+LOGGER = logging.getLogger(__name__)
+
+METRICS_POOL = MetricsPool('Context', 'RPC')
+
+class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceServicer):
+    def __init__(self, db_engine : sqlalchemy.engine.Engine, messagebroker : MessageBroker) -> None:
+        LOGGER.debug('Creating Servicer...')
+        self.db_engine = db_engine
+        self.messagebroker = messagebroker
+        LOGGER.debug('Servicer Created')
+
+    def _get_metrics(self) -> MetricsPool: return METRICS_POOL
+
+
+    # ----- Context ----------------------------------------------------------------------------------------------------
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListContextIds(self, request : Empty, context : grpc.ServicerContext) -> ContextIdList:
+        return ContextIdList(context_ids=context_list_ids(self.db_engine))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListContexts(self, request : Empty, context : grpc.ServicerContext) -> ContextList:
+        return ContextList(contexts=context_list_objs(self.db_engine))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetContext(self, request : ContextId, context : grpc.ServicerContext) -> Context:
+        return Context(**context_get(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetContext(self, request : Context, context : grpc.ServicerContext) -> ContextId:
+        context_id,updated = context_set(self.db_engine, request)
+        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+        notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': context_id})
+        return ContextId(**context_id)
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def RemoveContext(self, request : ContextId, context : grpc.ServicerContext) -> Empty:
+        context_id,deleted = context_delete(self.db_engine, request)
+        if deleted:
+            event_type = EventTypeEnum.EVENTTYPE_REMOVE
+            notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': context_id})
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetContextEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]:
+        for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT):
+            yield ContextEvent(**json.loads(message.content))
+
+
+    # ----- Topology ---------------------------------------------------------------------------------------------------
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListTopologyIds(self, request : ContextId, context : grpc.ServicerContext) -> TopologyIdList:
+        return TopologyIdList(topology_ids=topology_list_ids(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListTopologies(self, request : ContextId, context : grpc.ServicerContext) -> TopologyList:
+        return TopologyList(topologies=topology_list_objs(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetTopology(self, request : TopologyId, context : grpc.ServicerContext) -> Topology:
+        return Topology(**topology_get(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetTopology(self, request : Topology, context : grpc.ServicerContext) -> TopologyId:
+        topology_id,updated = topology_set(self.db_engine, request)
+        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+        notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': topology_id})
+        return TopologyId(**topology_id)
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def RemoveTopology(self, request : TopologyId, context : grpc.ServicerContext) -> Empty:
+        topology_id,deleted = topology_delete(self.db_engine, request)
+        if deleted:
+            event_type = EventTypeEnum.EVENTTYPE_REMOVE
+            notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': topology_id})
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetTopologyEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]:
+        for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT):
+            yield TopologyEvent(**json.loads(message.content))
+
+
+    # ----- Device -----------------------------------------------------------------------------------------------------
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListDeviceIds(self, request : Empty, context : grpc.ServicerContext) -> DeviceIdList:
+        return DeviceIdList(device_ids=device_list_ids(self.db_engine))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListDevices(self, request : Empty, context : grpc.ServicerContext) -> DeviceList:
+        return DeviceList(devices=device_list_objs(self.db_engine))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetDevice(self, request : ContextId, context : grpc.ServicerContext) -> Device:
+        return Device(**device_get(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId:
+        device_id,updated = device_set(self.db_engine, request)
+        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+        notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': device_id})
+        return DeviceId(**device_id)
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def RemoveDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty:
+        device_id,deleted = device_delete(self.db_engine, request)
+        if deleted:
+            event_type = EventTypeEnum.EVENTTYPE_REMOVE
+            notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': device_id})
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetDeviceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]:
+        for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT):
+            yield DeviceEvent(**json.loads(message.content))
+
+
+    # ----- Link -------------------------------------------------------------------------------------------------------
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListLinkIds(self, request : Empty, context : grpc.ServicerContext) -> LinkIdList:
+        return LinkIdList(link_ids=link_list_ids(self.db_engine))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListLinks(self, request : Empty, context : grpc.ServicerContext) -> LinkList:
+        return LinkList(links=link_list_objs(self.db_engine))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetLink(self, request : LinkId, context : grpc.ServicerContext) -> Link:
+        return Link(**link_get(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetLink(self, request : Link, context : grpc.ServicerContext) -> LinkId:
+        link_id,updated = link_set(self.db_engine, request)
+        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+        notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': link_id})
+        return LinkId(**link_id)
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def RemoveLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty:
+        link_id,deleted = link_delete(self.db_engine, request)
+        if deleted:
+            event_type = EventTypeEnum.EVENTTYPE_REMOVE
+            notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': link_id})
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetLinkEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]:
+        for message in self.messagebroker.consume({TOPIC_LINK}, consume_timeout=CONSUME_TIMEOUT):
+            yield LinkEvent(**json.loads(message.content))
+
+
+    # ----- Service ----------------------------------------------------------------------------------------------------
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListServiceIds(self, request : ContextId, context : grpc.ServicerContext) -> ServiceIdList:
+        return ServiceIdList(service_ids=service_list_ids(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListServices(self, request : ContextId, context : grpc.ServicerContext) -> ServiceList:
+        return ServiceList(services=service_list_objs(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetService(self, request : ServiceId, context : grpc.ServicerContext) -> Service:
+        return Service(**service_get(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetService(self, request : Service, context : grpc.ServicerContext) -> ServiceId:
+        service_id,updated = service_set(self.db_engine, request)
+        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+        notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id})
+        return ServiceId(**service_id)
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def RemoveService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty:
+        service_id,deleted = service_delete(self.db_engine, request)
+        if deleted:
+            event_type = EventTypeEnum.EVENTTYPE_REMOVE
+            notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': service_id})
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetServiceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]:
+        for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT):
+            yield ServiceEvent(**json.loads(message.content))
+
+
+    # ----- Slice ----------------------------------------------------------------------------------------------------
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListSliceIds(self, request : ContextId, context : grpc.ServicerContext) -> SliceIdList:
+        return SliceIdList(slice_ids=slice_list_ids(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListSlices(self, request : ContextId, context : grpc.ServicerContext) -> SliceList:
+        return SliceList(slices=slice_list_objs(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetSlice(self, request : SliceId, context : grpc.ServicerContext) -> Slice:
+        return Slice(**slice_get(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId:
+        slice_id,updated = slice_set(self.db_engine, request)
+        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+        notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id})
+        return SliceId(**slice_id)
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def UnsetSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId:
+        slice_id,updated = slice_unset(self.db_engine, request)
+        if updated:
+            event_type = EventTypeEnum.EVENTTYPE_UPDATE
+            notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id})
+        return SliceId(**slice_id)
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def RemoveSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty:
+        slice_id,deleted = slice_delete(self.db_engine, request)
+        if deleted:
+            event_type = EventTypeEnum.EVENTTYPE_REMOVE
+            notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': slice_id})
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetSliceEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]:
+        for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT):
+            yield SliceEvent(**json.loads(message.content))
+
+
+    # ----- Connection -------------------------------------------------------------------------------------------------
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListConnectionIds(self, request : ServiceId, context : grpc.ServicerContext) -> ConnectionIdList:
+        return ConnectionIdList(connection_ids=connection_list_ids(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListConnections(self, request : ContextId, context : grpc.ServicerContext) -> ConnectionList:
+        return ConnectionList(connections=connection_list_objs(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Connection:
+        return Connection(**connection_get(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetConnection(self, request : Connection, context : grpc.ServicerContext) -> ConnectionId:
+        connection_id,updated = connection_set(self.db_engine, request)
+        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+        notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': connection_id})
+        return ConnectionId(**connection_id)
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def RemoveConnection(self, request : ConnectionId, context : grpc.ServicerContext) -> Empty:
+        connection_id,deleted = connection_delete(self.db_engine, request)
+        if deleted:
+            event_type = EventTypeEnum.EVENTTYPE_REMOVE
+            notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': connection_id})
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetConnectionEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]:
+        for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT):
+            yield ConnectionEvent(**json.loads(message.content))
+
+
+    # ----- Policy -----------------------------------------------------------------------------------------------------
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListPolicyRuleIds(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleIdList:
+        return PolicyRuleIdList(policyRuleIdList=policyrule_list_ids(self.db_engine))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListPolicyRules(self, request : Empty, context: grpc.ServicerContext) -> PolicyRuleList:
+        return PolicyRuleList(policyRules=policyrule_list_objs(self.db_engine))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetPolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule:
+        return PolicyRule(**policyrule_get(self.db_engine, request))
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetPolicyRule(self, request : PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId:
+        policyrule_id,updated = policyrule_set(self.db_engine, request)
+        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+        notify_event(self.messagebroker, TOPIC_POLICY, event_type, {'policyrule_id': policyrule_id})
+        return PolicyRuleId(**policyrule_id)
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def RemovePolicyRule(self, request : PolicyRuleId, context: grpc.ServicerContext) -> Empty:
+        policyrule_id,deleted = policyrule_delete(self.db_engine, request)
+        if deleted:
+            event_type = EventTypeEnum.EVENTTYPE_REMOVE
+            notify_event(self.messagebroker, TOPIC_POLICY, event_type, {'policyrule_id': policyrule_id})
+        return Empty()
diff --git a/src/context/service/database/Events.py b/src/context/service/Events.py
similarity index 72%
rename from src/context/service/database/Events.py
rename to src/context/service/Events.py
index 46b1d36c4a40a48cf2928f1e1c80822443e3cf28..77401314bfc7f6682a2d1515cbfce26ebf123332 100644
--- a/src/context/service/database/Events.py
+++ b/src/context/service/Events.py
@@ -18,9 +18,24 @@ from common.message_broker.Message import Message
 from common.message_broker.MessageBroker import MessageBroker
 from common.proto.context_pb2 import EventTypeEnum
 
-def notify_event(
-    messagebroker : MessageBroker, topic_name : str, event_type : EventTypeEnum, fields : Dict[str, str]) -> None:
+TOPIC_CONNECTION = 'connection'
+TOPIC_CONTEXT    = 'context'
+TOPIC_DEVICE     = 'device'
+TOPIC_LINK       = 'link'
+TOPIC_POLICY     = 'policy'
+TOPIC_SERVICE    = 'service'
+TOPIC_SLICE      = 'slice'
+TOPIC_TOPOLOGY   = 'topology'
+
+TOPICS = {
+    TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_POLICY, TOPIC_SERVICE, TOPIC_SLICE, TOPIC_TOPOLOGY
+}
 
+CONSUME_TIMEOUT = 0.5 # seconds
+
+def notify_event(
+    messagebroker : MessageBroker, topic_name : str, event_type : EventTypeEnum, fields : Dict[str, str]
+) -> None:
     event = {'event': {'timestamp': {'timestamp': time.time()}, 'event_type': event_type}}
     for field_name, field_value in fields.items():
         event[field_name] = field_value
diff --git a/src/context/service/Populate.py b/src/context/service/Populate.py
deleted file mode 100644
index ffb739988d163d30e9426da54b990f66015e70a1..0000000000000000000000000000000000000000
--- a/src/context/service/Populate.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import copy
-from common.proto.context_pb2 import Connection, Context, Device, Link, Service, Topology
-from context.client.ContextClient import ContextClient
-from context.tests.Objects import (
-    CONNECTION_R1_R3, CONTEXT, TOPOLOGY, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R3, DEVICE_R3_ID,
-    LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R3, LINK_R1_R3_ID, LINK_R2_R3, LINK_R2_R3_ID, SERVICE_R1_R2, SERVICE_R1_R3,
-    SERVICE_R2_R3)
-
-def populate(host=None, port=None):
-    client = ContextClient(host=host, port=port)
-
-    client.SetContext(Context(**CONTEXT))
-    client.SetTopology(Topology(**TOPOLOGY))
-    client.SetDevice(Device(**DEVICE_R1))
-    client.SetDevice(Device(**DEVICE_R2))
-    client.SetDevice(Device(**DEVICE_R3))
-
-    client.SetLink(Link(**LINK_R1_R2))
-    client.SetLink(Link(**LINK_R1_R3))
-    client.SetLink(Link(**LINK_R2_R3))
-
-    TOPOLOGY_WITH_DEVICES_AND_LINKS = copy.deepcopy(TOPOLOGY)
-    TOPOLOGY_WITH_DEVICES_AND_LINKS['device_ids'].append(DEVICE_R1_ID)
-    TOPOLOGY_WITH_DEVICES_AND_LINKS['device_ids'].append(DEVICE_R2_ID)
-    TOPOLOGY_WITH_DEVICES_AND_LINKS['device_ids'].append(DEVICE_R3_ID)
-    TOPOLOGY_WITH_DEVICES_AND_LINKS['link_ids'].append(LINK_R1_R2_ID)
-    TOPOLOGY_WITH_DEVICES_AND_LINKS['link_ids'].append(LINK_R1_R3_ID)
-    TOPOLOGY_WITH_DEVICES_AND_LINKS['link_ids'].append(LINK_R2_R3_ID)
-    client.SetTopology(Topology(**TOPOLOGY_WITH_DEVICES_AND_LINKS))
-
-    client.SetService(Service(**SERVICE_R1_R2))
-    client.SetService(Service(**SERVICE_R2_R3))
-
-    client.SetService(Service(**SERVICE_R1_R3))
-    client.SetConnection(Connection(**CONNECTION_R1_R3))
diff --git a/src/context/service/__main__.py b/src/context/service/__main__.py
index dfd0c8773b6a7d2dea7bafa12c12018d62b7cdb8..63e9c611c87709855d1715ea1387e9ef6630faa1 100644
--- a/src/context/service/__main__.py
+++ b/src/context/service/__main__.py
@@ -14,69 +14,53 @@
 
 import logging, signal, sys, threading
 from prometheus_client import start_http_server
-from common.Settings import get_log_level, get_metrics_port, get_setting
-from common.orm.Database import Database
-from common.orm.Factory import get_database_backend
+from common.Settings import get_log_level, get_metrics_port
 from common.message_broker.Factory import get_messagebroker_backend
 from common.message_broker.MessageBroker import MessageBroker
-from context.Config import POPULATE_FAKE_DATA
-from .grpc_server.ContextService import ContextService
-from .rest_server.Resources import RESOURCES
-from .rest_server.RestServer import RestServer
-from .Populate import populate
+from .ContextService import ContextService
+from .database.Engine import Engine
+from .database.models._Base import rebuild_database
+
+LOG_LEVEL = get_log_level()
+logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+LOGGER = logging.getLogger(__name__)
+
+#LOGGER.addHandler(logging.StreamHandler(stream=sys.stderr))
+#LOGGER.setLevel(logging.WARNING)
 
 terminate = threading.Event()
-LOGGER = None
 
-def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name,unused-argument
     LOGGER.warning('Terminate signal received')
     terminate.set()
 
 def main():
-    global LOGGER # pylint: disable=global-statement
-
-    log_level = get_log_level()
-    logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
-    LOGGER = logging.getLogger(__name__)
-
+    LOGGER.info('Starting...')
     signal.signal(signal.SIGINT,  signal_handler)
     signal.signal(signal.SIGTERM, signal_handler)
 
-    LOGGER.info('Starting...')
-
     # Start metrics server
     metrics_port = get_metrics_port()
     start_http_server(metrics_port)
 
-    # Get database instance
-    database = Database(get_database_backend())
+    # Get Database Engine instance and initialize database, if needed
+    db_engine = Engine.get_engine()
+    if db_engine is None: return -1
+    Engine.create_database(db_engine)
+    rebuild_database(db_engine)
 
     # Get message broker instance
     messagebroker = MessageBroker(get_messagebroker_backend())
 
     # Starting context service
-    grpc_service = ContextService(database, messagebroker)
+    grpc_service = ContextService(db_engine, messagebroker)
     grpc_service.start()
 
-    rest_server = RestServer()
-    for endpoint_name, resource_class, resource_url in RESOURCES:
-        rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,))
-    rest_server.start()
-
-    populate_fake_data = get_setting('POPULATE_FAKE_DATA', default=POPULATE_FAKE_DATA)
-    if isinstance(populate_fake_data, str): populate_fake_data = (populate_fake_data.upper() in {'T', '1', 'TRUE'})
-    if populate_fake_data:
-        LOGGER.info('Populating fake data...')
-        populate(host='127.0.0.1', port=grpc_service.bind_port)
-        LOGGER.info('Fake Data populated')
-
     # Wait for Ctrl+C or termination signal
     while not terminate.wait(timeout=0.1): pass
 
     LOGGER.info('Terminating...')
     grpc_service.stop()
-    rest_server.shutdown()
-    rest_server.join()
 
     LOGGER.info('Bye')
     return 0
diff --git a/src/context/service/database/ConfigModel.py b/src/context/service/database/ConfigModel.py
deleted file mode 100644
index 5c6ef0079a03116d4f67519440d93185b94f2969..0000000000000000000000000000000000000000
--- a/src/context/service/database/ConfigModel.py
+++ /dev/null
@@ -1,228 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import functools, json, logging, operator
-from enum import Enum
-from typing import Dict, List, Optional, Tuple, Type, Union
-from common.orm.Database import Database
-from common.orm.HighLevel import get_object, get_or_create_object, update_or_create_object
-from common.orm.backend.Tools import key_to_str
-from common.orm.fields.EnumeratedField import EnumeratedField
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.IntegerField import IntegerField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-from common.proto.context_pb2 import ConfigActionEnum, ConfigRule
-from common.tools.grpc.Tools import grpc_message_to_json_string
-#from .EndPointModel import EndPointModel, get_endpoint
-from .Tools import fast_hasher, grpc_to_enum, remove_dict_key
-
-LOGGER = logging.getLogger(__name__)
-
-class ORM_ConfigActionEnum(Enum):
-    UNDEFINED = ConfigActionEnum.CONFIGACTION_UNDEFINED
-    SET       = ConfigActionEnum.CONFIGACTION_SET
-    DELETE    = ConfigActionEnum.CONFIGACTION_DELETE
-
-grpc_to_enum__config_action = functools.partial(
-    grpc_to_enum, ConfigActionEnum, ORM_ConfigActionEnum)
-
-class ConfigModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-
-    def delete(self) -> None:
-        db_config_rule_pks = self.references(ConfigRuleModel)
-        for pk,_ in db_config_rule_pks: ConfigRuleModel(self.database, pk).delete()
-        super().delete()
-
-    def dump(self) -> List[Dict]:
-        db_config_rule_pks = self.references(ConfigRuleModel)
-        config_rules = [ConfigRuleModel(self.database, pk).dump(include_position=True) for pk,_ in db_config_rule_pks]
-        config_rules = sorted(config_rules, key=operator.itemgetter('position'))
-        return [remove_dict_key(config_rule, 'position') for config_rule in config_rules]
-
-class ConfigRuleCustomModel(Model): # pylint: disable=abstract-method
-    key = StringField(required=True, allow_empty=False)
-    value = StringField(required=True, allow_empty=False)
-
-    def dump(self) -> Dict: # pylint: disable=arguments-differ
-        return {'custom': {'resource_key': self.key, 'resource_value': self.value}}
-
-class ConfigRuleAclModel(Model): # pylint: disable=abstract-method
-    # TODO: improve definition of fields in ConfigRuleAclModel
-    # To simplify, endpoint encoded as JSON-string directly; otherwise causes circular dependencies
-    #endpoint_fk = ForeignKeyField(EndPointModel)
-    endpoint_id = StringField(required=True, allow_empty=False)
-    # To simplify, ACL rule is encoded as a JSON-string directly
-    acl_data = StringField(required=True, allow_empty=False)
-
-    def dump(self) -> Dict: # pylint: disable=arguments-differ
-        #json_endpoint_id = EndPointModel(self.database, self.endpoint_fk).dump_id()
-        json_endpoint_id = json.loads(self.endpoint_id)
-        json_acl_rule_set = json.loads(self.acl_data)
-        return {'acl': {'endpoint_id': json_endpoint_id, 'rule_set': json_acl_rule_set}}
-
-# enum values should match name of field in ConfigRuleModel
-class ConfigRuleKindEnum(Enum):
-    CUSTOM = 'custom'
-    ACL    = 'acl'
-
-Union_SpecificConfigRule = Union[
-    ConfigRuleCustomModel, ConfigRuleAclModel
-]
-
-class ConfigRuleModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    config_fk = ForeignKeyField(ConfigModel)
-    kind = EnumeratedField(ConfigRuleKindEnum)
-    position = IntegerField(min_value=0, required=True)
-    action = EnumeratedField(ORM_ConfigActionEnum, required=True)
-    config_rule_custom_fk = ForeignKeyField(ConfigRuleCustomModel, required=False)
-    config_rule_acl_fk    = ForeignKeyField(ConfigRuleAclModel, required=False)
-
-    def delete(self) -> None:
-        field_name = 'config_rule_{:s}_fk'.format(str(self.kind.value))
-        specific_fk_value : Optional[ForeignKeyField] = getattr(self, field_name, None)
-        if specific_fk_value is None:
-            raise Exception('Unable to find config_rule key for field_name({:s})'.format(field_name))
-        specific_fk_class = getattr(ConfigRuleModel, field_name, None)
-        foreign_model_class : Model = specific_fk_class.foreign_model
-        super().delete()
-        get_object(self.database, foreign_model_class, str(specific_fk_value)).delete()
-
-    def dump(self, include_position=True) -> Dict: # pylint: disable=arguments-differ
-        field_name = 'config_rule_{:s}_fk'.format(str(self.kind.value))
-        specific_fk_value : Optional[ForeignKeyField] = getattr(self, field_name, None)
-        if specific_fk_value is None:
-            raise Exception('Unable to find config_rule key for field_name({:s})'.format(field_name))
-        specific_fk_class = getattr(ConfigRuleModel, field_name, None)
-        foreign_model_class : Model = specific_fk_class.foreign_model
-        config_rule : Union_SpecificConfigRule = get_object(self.database, foreign_model_class, str(specific_fk_value))
-        result = config_rule.dump()
-        result['action'] = self.action.value
-        if include_position: result['position'] = self.position
-        return result
-
-Tuple_ConfigRuleSpecs = Tuple[Type, str, Dict, ConfigRuleKindEnum]
-
-def parse_config_rule_custom(database : Database, grpc_config_rule) -> Tuple_ConfigRuleSpecs:
-    config_rule_class = ConfigRuleCustomModel
-    str_config_rule_id = grpc_config_rule.custom.resource_key
-    config_rule_data = {
-        'key'  : grpc_config_rule.custom.resource_key,
-        'value': grpc_config_rule.custom.resource_value,
-    }
-    return config_rule_class, str_config_rule_id, config_rule_data, ConfigRuleKindEnum.CUSTOM
-
-def parse_config_rule_acl(database : Database, grpc_config_rule) -> Tuple_ConfigRuleSpecs:
-    config_rule_class = ConfigRuleAclModel
-    grpc_endpoint_id = grpc_config_rule.acl.endpoint_id
-    grpc_rule_set = grpc_config_rule.acl.rule_set
-    device_uuid = grpc_endpoint_id.device_id.device_uuid.uuid
-    endpoint_uuid = grpc_endpoint_id.endpoint_uuid.uuid
-    str_endpoint_key = '/'.join([device_uuid, endpoint_uuid])
-    #str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id)
-    str_config_rule_id = ':'.join([str_endpoint_key, grpc_rule_set.name])
-    config_rule_data = {
-        #'endpoint_fk': db_endpoint,
-        'endpoint_id': grpc_message_to_json_string(grpc_endpoint_id),
-        'acl_data': grpc_message_to_json_string(grpc_rule_set),
-    }
-    return config_rule_class, str_config_rule_id, config_rule_data, ConfigRuleKindEnum.ACL
-
-CONFIGRULE_PARSERS = {
-    'custom': parse_config_rule_custom,
-    'acl'   : parse_config_rule_acl,
-}
-
-Union_ConfigRuleModel = Union[
-    ConfigRuleCustomModel, ConfigRuleAclModel,
-]
-
-def set_config_rule(
-    database : Database, db_config : ConfigModel, grpc_config_rule : ConfigRule, position : int
-) -> Tuple[Union_ConfigRuleModel, bool]:
-    grpc_config_rule_kind = str(grpc_config_rule.WhichOneof('config_rule'))
-    parser = CONFIGRULE_PARSERS.get(grpc_config_rule_kind)
-    if parser is None:
-        raise NotImplementedError('ConfigRule of kind {:s} is not implemented: {:s}'.format(
-            grpc_config_rule_kind, grpc_message_to_json_string(grpc_config_rule)))
-
-    # create specific ConfigRule
-    config_rule_class, str_config_rule_id, config_rule_data, config_rule_kind = parser(database, grpc_config_rule)
-    str_config_rule_key_hash = fast_hasher(':'.join([config_rule_kind.value, str_config_rule_id]))
-    str_config_rule_key = key_to_str([db_config.pk, str_config_rule_key_hash], separator=':')
-    result : Tuple[Union_ConfigRuleModel, bool] = update_or_create_object(
-        database, config_rule_class, str_config_rule_key, config_rule_data)
-    db_specific_config_rule, updated = result
-
-    # create generic ConfigRule
-    config_rule_fk_field_name = 'config_rule_{:s}_fk'.format(config_rule_kind.value)
-    config_rule_data = {
-        'config_fk': db_config, 'kind': config_rule_kind, 'position': position,
-        'action': ORM_ConfigActionEnum.SET,
-        config_rule_fk_field_name: db_specific_config_rule
-    }
-    result : Tuple[ConfigRuleModel, bool] = update_or_create_object(
-        database, ConfigRuleModel, str_config_rule_key, config_rule_data)
-    db_config_rule, updated = result
-
-    return db_config_rule, updated
-
-def delete_config_rule(
-    database : Database, db_config : ConfigModel, grpc_config_rule : ConfigRule
-) -> None:
-    grpc_config_rule_kind = str(grpc_config_rule.WhichOneof('config_rule'))
-    parser = CONFIGRULE_PARSERS.get(grpc_config_rule_kind)
-    if parser is None:
-        raise NotImplementedError('ConfigRule of kind {:s} is not implemented: {:s}'.format(
-            grpc_config_rule_kind, grpc_message_to_json_string(grpc_config_rule)))
-
-    # delete generic config rules; self deletes specific config rule
-    _, str_config_rule_id, _, config_rule_kind = parser(database, grpc_config_rule)
-    str_config_rule_key_hash = fast_hasher(':'.join([config_rule_kind.value, str_config_rule_id]))
-    str_config_rule_key = key_to_str([db_config.pk, str_config_rule_key_hash], separator=':')
-    db_config_rule : Optional[ConfigRuleModel] = get_object(
-        database, ConfigRuleModel, str_config_rule_key, raise_if_not_found=False)
-    if db_config_rule is None: return
-    db_config_rule.delete()
-
-def update_config(
-    database : Database, db_parent_pk : str, config_name : str, grpc_config_rules
-) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]:
-
-    str_config_key = key_to_str([config_name, db_parent_pk], separator=':')
-    result : Tuple[ConfigModel, bool] = get_or_create_object(database, ConfigModel, str_config_key)
-    db_config, created = result
-
-    db_objects = [(db_config, created)]
-
-    for position,grpc_config_rule in enumerate(grpc_config_rules):
-        action = grpc_to_enum__config_action(grpc_config_rule.action)
-
-        if action == ORM_ConfigActionEnum.SET:
-            result : Tuple[ConfigRuleModel, bool] = set_config_rule(
-                database, db_config, grpc_config_rule, position)
-            db_config_rule, updated = result
-            db_objects.append((db_config_rule, updated))
-        elif action == ORM_ConfigActionEnum.DELETE:
-            delete_config_rule(database, db_config, grpc_config_rule)
-        else:
-            msg = 'Unsupported Action({:s}) for ConfigRule({:s})'
-            str_action = str(ConfigActionEnum.Name(action))
-            str_config_rule = grpc_message_to_json_string(grpc_config_rule)
-            raise AttributeError(msg.format(str_action, str_config_rule))
-
-    return db_objects
diff --git a/src/context/service/database/ConfigRule.py b/src/context/service/database/ConfigRule.py
new file mode 100644
index 0000000000000000000000000000000000000000..5443e178c0f726be5b55e7955a6dc7b575d9f53a
--- /dev/null
+++ b/src/context/service/database/ConfigRule.py
@@ -0,0 +1,203 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, logging
+from sqlalchemy import delete
+from sqlalchemy.dialects.postgresql import insert
+from sqlalchemy.orm import Session
+from typing import Dict, List, Optional
+from common.proto.context_pb2 import ConfigRule
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from .models.enums.ConfigAction import grpc_to_enum__config_action
+from .models.ConfigRuleModel import ConfigRuleKindEnum, ConfigRuleModel
+from .uuids._Builder import get_uuid_random
+
+LOGGER = logging.getLogger(__name__)
+
+def compose_config_rules_data(
+    config_rules : List[ConfigRule], now : datetime.datetime,
+    device_uuid : Optional[str] = None, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None
+) -> List[Dict]:
+    dict_config_rules : List[Dict] = list()
+    for position,config_rule in enumerate(config_rules):
+        configrule_uuid = get_uuid_random()
+        str_kind = config_rule.WhichOneof('config_rule')
+        dict_config_rule = {
+            'configrule_uuid': configrule_uuid,
+            'position'       : position,
+            'kind'           : ConfigRuleKindEnum._member_map_.get(str_kind.upper()), # pylint: disable=no-member
+            'action'         : grpc_to_enum__config_action(config_rule.action),
+            'data'           : grpc_message_to_json_string(getattr(config_rule, str_kind, {})),
+            'created_at'     : now,
+            'updated_at'     : now,
+        }
+        if device_uuid  is not None: dict_config_rule['device_uuid' ] = device_uuid
+        if service_uuid is not None: dict_config_rule['service_uuid'] = service_uuid
+        if slice_uuid   is not None: dict_config_rule['slice_uuid'  ] = slice_uuid
+        dict_config_rules.append(dict_config_rule)
+    return dict_config_rules
+
+def upsert_config_rules(
+    session : Session, config_rules : List[Dict],
+    device_uuid : Optional[str] = None, service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None,
+) -> List[bool]:
+    # TODO: do not delete all rules; just add-remove as needed
+    stmt = delete(ConfigRuleModel)
+    if device_uuid  is not None: stmt = stmt.where(ConfigRuleModel.device_uuid  == device_uuid )
+    if service_uuid is not None: stmt = stmt.where(ConfigRuleModel.service_uuid == service_uuid)
+    if slice_uuid   is not None: stmt = stmt.where(ConfigRuleModel.slice_uuid   == slice_uuid  )
+    session.execute(stmt)
+
+    configrule_updates = []
+    if len(config_rules) > 0:
+        stmt = insert(ConfigRuleModel).values(config_rules)
+        #stmt = stmt.on_conflict_do_update(
+        #    index_elements=[ConfigRuleModel.configrule_uuid],
+        #    set_=dict(
+        #        updated_at = stmt.excluded.updated_at,
+        #    )
+        #)
+        stmt = stmt.returning(ConfigRuleModel.created_at, ConfigRuleModel.updated_at)
+        configrule_updates = session.execute(stmt).fetchall()
+
+    return configrule_updates
+
+#Union_SpecificConfigRule = Union[
+#    ConfigRuleCustomModel, ConfigRuleAclModel
+#]
+#
+#def set_config_rule(
+#    database : Database, db_config : ConfigModel, position : int, resource_key : str, resource_value : str,
+#): # -> Tuple[ConfigRuleModel, bool]:
+#
+#    str_rule_key_hash = fast_hasher(resource_key)
+#    str_config_rule_key = key_to_str([db_config.config_uuid, str_rule_key_hash], separator=':')
+#
+#    data = {'config_fk': db_config, 'position': position, 'action': ORM_ConfigActionEnum.SET, 'key': resource_key,
+#            'value': resource_value}
+#    to_add = ConfigRuleModel(**data)
+#
+#    result = database.create_or_update(to_add)
+#    return result
+#Tuple_ConfigRuleSpecs = Tuple[Type, str, Dict, ConfigRuleKindEnum]
+#
+#def parse_config_rule_custom(database : Database, grpc_config_rule) -> Tuple_ConfigRuleSpecs:
+#    config_rule_class = ConfigRuleCustomModel
+#    str_config_rule_id = grpc_config_rule.custom.resource_key
+#    config_rule_data = {
+#        'key'  : grpc_config_rule.custom.resource_key,
+#        'value': grpc_config_rule.custom.resource_value,
+#    }
+#    return config_rule_class, str_config_rule_id, config_rule_data, ConfigRuleKindEnum.CUSTOM
+#
+#def parse_config_rule_acl(database : Database, grpc_config_rule) -> Tuple_ConfigRuleSpecs:
+#    config_rule_class = ConfigRuleAclModel
+#    grpc_endpoint_id = grpc_config_rule.acl.endpoint_id
+#    grpc_rule_set = grpc_config_rule.acl.rule_set
+#    device_uuid = grpc_endpoint_id.device_id.device_uuid.uuid
+#    endpoint_uuid = grpc_endpoint_id.endpoint_uuid.uuid
+#    str_endpoint_key = '/'.join([device_uuid, endpoint_uuid])
+#    #str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id)
+#    str_config_rule_id = ':'.join([str_endpoint_key, grpc_rule_set.name])
+#    config_rule_data = {
+#        #'endpoint_fk': db_endpoint,
+#        'endpoint_id': grpc_message_to_json_string(grpc_endpoint_id),
+#        'acl_data': grpc_message_to_json_string(grpc_rule_set),
+#    }
+#    return config_rule_class, str_config_rule_id, config_rule_data, ConfigRuleKindEnum.ACL
+#
+#CONFIGRULE_PARSERS = {
+#    'custom': parse_config_rule_custom,
+#    'acl'   : parse_config_rule_acl,
+#}
+#
+#Union_ConfigRuleModel = Union[
+#    ConfigRuleCustomModel, ConfigRuleAclModel,
+#]
+#
+#def set_config_rule(
+#    database : Database, db_config : ConfigModel, grpc_config_rule : ConfigRule, position : int
+#) -> Tuple[Union_ConfigRuleModel, bool]:
+#    grpc_config_rule_kind = str(grpc_config_rule.WhichOneof('config_rule'))
+#    parser = CONFIGRULE_PARSERS.get(grpc_config_rule_kind)
+#    if parser is None:
+#        raise NotImplementedError('ConfigRule of kind {:s} is not implemented: {:s}'.format(
+#            grpc_config_rule_kind, grpc_message_to_json_string(grpc_config_rule)))
+#
+#    # create specific ConfigRule
+#    config_rule_class, str_config_rule_id, config_rule_data, config_rule_kind = parser(database, grpc_config_rule)
+#    str_config_rule_key_hash = fast_hasher(':'.join([config_rule_kind.value, str_config_rule_id]))
+#    str_config_rule_key = key_to_str([db_config.pk, str_config_rule_key_hash], separator=':')
+#    result : Tuple[Union_ConfigRuleModel, bool] = update_or_create_object(
+#        database, config_rule_class, str_config_rule_key, config_rule_data)
+#    db_specific_config_rule, updated = result
+#
+#    # create generic ConfigRule
+#    config_rule_fk_field_name = 'config_rule_{:s}_fk'.format(config_rule_kind.value)
+#    config_rule_data = {
+#        'config_fk': db_config, 'kind': config_rule_kind, 'position': position,
+#        'action': ORM_ConfigActionEnum.SET,
+#        config_rule_fk_field_name: db_specific_config_rule
+#    }
+#    result : Tuple[ConfigRuleModel, bool] = update_or_create_object(
+#        database, ConfigRuleModel, str_config_rule_key, config_rule_data)
+#    db_config_rule, updated = result
+#
+#    return db_config_rule, updated
+#
+#def delete_config_rule(
+#    database : Database, db_config : ConfigModel, grpc_config_rule : ConfigRule
+#) -> None:
+#    grpc_config_rule_kind = str(grpc_config_rule.WhichOneof('config_rule'))
+#    parser = CONFIGRULE_PARSERS.get(grpc_config_rule_kind)
+#    if parser is None:
+#        raise NotImplementedError('ConfigRule of kind {:s} is not implemented: {:s}'.format(
+#            grpc_config_rule_kind, grpc_message_to_json_string(grpc_config_rule)))
+#
+#    # delete generic config rules; self deletes specific config rule
+#    _, str_config_rule_id, _, config_rule_kind = parser(database, grpc_config_rule)
+#    str_config_rule_key_hash = fast_hasher(':'.join([config_rule_kind.value, str_config_rule_id]))
+#    str_config_rule_key = key_to_str([db_config.pk, str_config_rule_key_hash], separator=':')
+#    db_config_rule : Optional[ConfigRuleModel] = get_object(
+#        database, ConfigRuleModel, str_config_rule_key, raise_if_not_found=False)
+#    if db_config_rule is None: return
+#    db_config_rule.delete()
+#
+#def update_config(
+#    database : Database, db_parent_pk : str, config_name : str, grpc_config_rules
+#) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]:
+#
+#    str_config_key = key_to_str([config_name, db_parent_pk], separator=':')
+#    result : Tuple[ConfigModel, bool] = get_or_create_object(database, ConfigModel, str_config_key)
+#    db_config, created = result
+#
+#    db_objects = [(db_config, created)]
+#
+#    for position,grpc_config_rule in enumerate(grpc_config_rules):
+#        action = grpc_to_enum__config_action(grpc_config_rule.action)
+#
+#        if action == ORM_ConfigActionEnum.SET:
+#            result : Tuple[ConfigRuleModel, bool] = set_config_rule(
+#                database, db_config, grpc_config_rule, position)
+#            db_config_rule, updated = result
+#            db_objects.append((db_config_rule, updated))
+#        elif action == ORM_ConfigActionEnum.DELETE:
+#            delete_config_rule(database, db_config, grpc_config_rule)
+#        else:
+#            msg = 'Unsupported Action({:s}) for ConfigRule({:s})'
+#            str_action = str(ConfigActionEnum.Name(action))
+#            str_config_rule = grpc_message_to_json_string(grpc_config_rule)
+#            raise AttributeError(msg.format(str_action, str_config_rule))
+#
+#    return db_objects
diff --git a/src/context/service/database/Connection.py b/src/context/service/database/Connection.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d6d941cbe9cbe21c776a65398cd3ad3bb2073cd
--- /dev/null
+++ b/src/context/service/database/Connection.py
@@ -0,0 +1,147 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, logging, re
+from sqlalchemy.dialects.postgresql import insert
+from sqlalchemy.engine import Engine
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy_cockroachdb import run_transaction
+from typing import Dict, List, Optional, Tuple
+from common.proto.context_pb2 import Connection, ConnectionId, ServiceId
+from common.method_wrappers.ServiceExceptions import NotFoundException
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Connection import json_connection_id
+from .models.ConnectionModel import ConnectionEndPointModel, ConnectionModel, ConnectionSubServiceModel
+from .uuids.Connection import connection_get_uuid
+from .uuids.EndPoint import endpoint_get_uuid
+from .uuids.Service import service_get_uuid
+
+LOGGER = logging.getLogger(__name__)
+
+def connection_list_ids(db_engine : Engine, request : ServiceId) -> List[Dict]:
+    _,service_uuid = service_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[ConnectionModel] = session.query(ConnectionModel).filter_by(service_uuid=service_uuid).all()
+        return [obj.dump_id() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def connection_list_objs(db_engine : Engine, request : ServiceId) -> List[Dict]:
+    _,service_uuid = service_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[ConnectionModel] = session.query(ConnectionModel).filter_by(service_uuid=service_uuid).all()
+        return [obj.dump() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def connection_get(db_engine : Engine, request : ConnectionId) -> Dict:
+    connection_uuid = connection_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> Optional[Dict]:
+        obj : Optional[ConnectionModel] = session.query(ConnectionModel)\
+            .filter_by(connection_uuid=connection_uuid).one_or_none()
+        return None if obj is None else obj.dump()
+    obj = run_transaction(sessionmaker(bind=db_engine), callback)
+    if obj is None:
+        raise NotFoundException('Connection', request.connection_uuid.uuid, extra_details=[
+            'connection_uuid generated was: {:s}'.format(connection_uuid),
+        ])
+    return obj
+
+def connection_set(db_engine : Engine, request : Connection) -> Tuple[Dict, bool]:
+    connection_uuid = connection_get_uuid(request.connection_id, allow_random=True)
+    _,service_uuid = service_get_uuid(request.service_id, allow_random=False)
+    settings = grpc_message_to_json_string(request.settings),
+
+    now = datetime.datetime.utcnow()
+
+    connection_data = [{
+        'connection_uuid': connection_uuid,
+        'service_uuid'   : service_uuid,
+        'settings'       : settings,
+        'created_at'     : now,
+        'updated_at'     : now,
+    }]
+
+    connection_endpoints_data : List[Dict] = list()
+    for position,endpoint_id in enumerate(request.path_hops_endpoint_ids):
+        _, _, endpoint_uuid = endpoint_get_uuid(endpoint_id, allow_random=False)
+        connection_endpoints_data.append({
+            'connection_uuid': connection_uuid,
+            'endpoint_uuid'  : endpoint_uuid,
+            'position'       : position,
+        })
+
+    connection_subservices_data : List[Dict] = list()
+    for service_id in request.sub_service_ids:
+        _, service_uuid = service_get_uuid(service_id, allow_random=False)
+        connection_subservices_data.append({
+            'connection_uuid': connection_uuid,
+            'subservice_uuid': service_uuid,
+        })
+
+    def callback(session : Session) -> bool:
+        stmt = insert(ConnectionModel).values(connection_data)
+        stmt = stmt.on_conflict_do_update(
+            index_elements=[ConnectionModel.connection_uuid],
+            set_=dict(
+                settings   = stmt.excluded.settings,
+                updated_at = stmt.excluded.updated_at,
+            )
+        )
+        stmt = stmt.returning(ConnectionModel.created_at, ConnectionModel.updated_at)
+        created_at,updated_at = session.execute(stmt).fetchone()
+        updated = updated_at > created_at
+
+        # TODO: manage update connection endpoints
+        if len(connection_endpoints_data) > 0:
+            stmt = insert(ConnectionEndPointModel).values(connection_endpoints_data)
+            stmt = stmt.on_conflict_do_nothing(
+                index_elements=[ConnectionEndPointModel.connection_uuid, ConnectionEndPointModel.endpoint_uuid]
+            )
+            try:
+                session.execute(stmt)
+            except IntegrityError as e:
+                str_args = ''.join(e.args).replace('\n', ' ')
+                pattern_fkv = \
+                    r'\(psycopg2.errors.ForeignKeyViolation\) '\
+                    r'insert on table \"([^\"]+)\" violates foreign key constraint '\
+                    r'.+DETAIL\:  Key \([^\)]+\)\=\([\'\"]*([^\)\'\"]+)[\'\"]*\) is not present in table \"([^\"]+)\"'
+                m_fkv = re.match(pattern_fkv, str_args)
+                if m_fkv is not None:
+                    insert_table, primary_key, origin_table = m_fkv.groups()
+                    raise NotFoundException(origin_table, primary_key, extra_details=[
+                        'while inserting in table "{:s}"'.format(insert_table)
+                    ]) from e
+                else:
+                    raise
+
+        # TODO: manage update connection subservices
+        if len(connection_subservices_data) > 0:
+            stmt = insert(ConnectionSubServiceModel).values(connection_subservices_data)
+            stmt = stmt.on_conflict_do_nothing(
+                index_elements=[ConnectionSubServiceModel.connection_uuid, ConnectionSubServiceModel.subservice_uuid]
+            )
+            session.execute(stmt)
+
+        return updated
+
+    updated = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_connection_id(connection_uuid),updated
+
+def connection_delete(db_engine : Engine, request : ConnectionId) -> Tuple[Dict, bool]:
+    connection_uuid = connection_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> bool:
+        num_deleted = session.query(ConnectionModel).filter_by(connection_uuid=connection_uuid).delete()
+        return num_deleted > 0
+    deleted = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_connection_id(connection_uuid),deleted
diff --git a/src/context/service/database/ConnectionModel.py b/src/context/service/database/ConnectionModel.py
deleted file mode 100644
index 4cbed43a40f3538633216f09060f8a2483fe5e1f..0000000000000000000000000000000000000000
--- a/src/context/service/database/ConnectionModel.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging, operator
-from typing import Dict, List, Optional, Set, Tuple, Union
-from common.orm.Database import Database
-from common.orm.backend.Tools import key_to_str
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.IntegerField import IntegerField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-from common.orm.HighLevel import get_object, get_or_create_object, get_related_objects, update_or_create_object
-from common.proto.context_pb2 import EndPointId
-from .EndPointModel import EndPointModel
-from .ServiceModel import ServiceModel
-from .Tools import remove_dict_key
-
-LOGGER = logging.getLogger(__name__)
-
-class PathModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-
-    def delete(self) -> None:
-        for db_path_hop_pk,_ in self.references(PathHopModel):
-            PathHopModel(self.database, db_path_hop_pk).delete()
-        super().delete()
-
-    def dump(self) -> List[Dict]:
-        db_path_hop_pks = self.references(PathHopModel)
-        path_hops = [PathHopModel(self.database, pk).dump(include_position=True) for pk,_ in db_path_hop_pks]
-        path_hops = sorted(path_hops, key=operator.itemgetter('position'))
-        return [remove_dict_key(path_hop, 'position') for path_hop in path_hops]
-
-class PathHopModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    path_fk = ForeignKeyField(PathModel)
-    position = IntegerField(min_value=0, required=True)
-    endpoint_fk = ForeignKeyField(EndPointModel)
-
-    def dump(self, include_position=True) -> Dict: # pylint: disable=arguments-differ
-        db_endpoint : EndPointModel = EndPointModel(self.database, self.endpoint_fk)
-        result = db_endpoint.dump_id()
-        if include_position: result['position'] = self.position
-        return result
-
-class ConnectionModel(Model):
-    pk = PrimaryKeyField()
-    connection_uuid = StringField(required=True, allow_empty=False)
-    service_fk = ForeignKeyField(ServiceModel, required=False)
-    path_fk = ForeignKeyField(PathModel, required=True)
-
-    def delete(self) -> None:
-        # pylint: disable=import-outside-toplevel
-        from .RelationModels import ConnectionSubServiceModel
-
-        # Do not remove sub-services automatically. They are supported by real services, so Service component should
-        # deal with the correct removal workflow to deconfigure the devices.
-        for db_connection_sub_service_pk,_ in self.references(ConnectionSubServiceModel):
-            ConnectionSubServiceModel(self.database, db_connection_sub_service_pk).delete()
-
-        super().delete()
-        PathModel(self.database, self.path_fk).delete()
-
-    def dump_id(self) -> Dict:
-        return {
-            'connection_uuid': {'uuid': self.connection_uuid},
-        }
-
-    def dump_path_hops_endpoint_ids(self) -> List[Dict]:
-        return PathModel(self.database, self.path_fk).dump()
-
-    def dump_sub_service_ids(self) -> List[Dict]:
-        from .RelationModels import ConnectionSubServiceModel # pylint: disable=import-outside-toplevel
-        db_sub_services = get_related_objects(self, ConnectionSubServiceModel, 'sub_service_fk')
-        return [db_sub_service.dump_id() for db_sub_service in sorted(db_sub_services, key=operator.attrgetter('pk'))]
-
-    def dump(self, include_path=True, include_sub_service_ids=True) -> Dict: # pylint: disable=arguments-differ
-        result = {'connection_id': self.dump_id()}
-        if self.service_fk is not None:
-            result['service_id'] = ServiceModel(self.database, self.service_fk).dump_id()
-        if include_path: result['path_hops_endpoint_ids'] = self.dump_path_hops_endpoint_ids()
-        if include_sub_service_ids: result['sub_service_ids'] = self.dump_sub_service_ids()
-        return result
-
-def set_path_hop(
-        database : Database, db_path : PathModel, position : int, db_endpoint : EndPointModel
-    ) -> Tuple[PathHopModel, bool]:
-
-    str_path_hop_key = key_to_str([db_path.pk, db_endpoint.pk], separator=':')
-    result : Tuple[PathHopModel, bool] = update_or_create_object(database, PathHopModel, str_path_hop_key, {
-        'path_fk': db_path, 'position': position, 'endpoint_fk': db_endpoint})
-    db_path_hop, updated = result
-    return db_path_hop, updated
-
-def delete_path_hop(
-        database : Database, db_path : PathModel, db_path_hop_pk : str
-    ) -> None:
-
-    db_path_hop : Optional[PathHopModel] = get_object(database, PathHopModel, db_path_hop_pk, raise_if_not_found=False)
-    if db_path_hop is None: return
-    db_path_hop.delete()
-
-def delete_all_path_hops(
-        database : Database, db_path : PathHopModel
-    ) -> None:
-
-    db_path_hop_pks = db_path.references(PathHopModel)
-    for pk,_ in db_path_hop_pks: PathHopModel(database, pk).delete()
-
-def set_path(
-        database : Database, connection_uuid : str, raw_endpoint_ids : List[EndPointId], path_name : str = ''
-    ) -> List[Union[PathModel, PathHopModel]]:
-
-    str_path_key = connection_uuid if len(path_name) == 0 else key_to_str([connection_uuid, path_name], separator=':')
-    result : Tuple[PathModel, bool] = get_or_create_object(database, PathModel, str_path_key)
-    db_path, created = result # pylint: disable=unused-variable
-
-    db_path_hop_pks : Set[str] = set(map(operator.itemgetter(0), db_path.references(PathHopModel)))
-    db_objects : List[Tuple[Union[PathModel, PathHopModel], bool]] = [db_path]
-
-    for position,endpoint_id in enumerate(raw_endpoint_ids):
-        endpoint_uuid                  = endpoint_id.endpoint_uuid.uuid
-        endpoint_device_uuid           = endpoint_id.device_id.device_uuid.uuid
-        endpoint_topology_uuid         = endpoint_id.topology_id.topology_uuid.uuid
-        endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-
-        str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid])
-        if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
-            str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
-            str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
-
-        db_endpoint : EndPointModel = get_object(database, EndPointModel, str_endpoint_key)
-
-        result : Tuple[PathHopModel, bool] = set_path_hop(database, db_path, position, db_endpoint)
-        db_path_hop, updated = result # pylint: disable=unused-variable
-        db_objects.append(db_path_hop)
-        db_path_hop_pks.discard(db_path_hop.instance_key)
-
-    for db_path_hop_pk in db_path_hop_pks: delete_path_hop(database, db_path, db_path_hop_pk)
-
-    return db_objects
diff --git a/src/context/service/database/Constraint.py b/src/context/service/database/Constraint.py
new file mode 100644
index 0000000000000000000000000000000000000000..2880c05a85dbde7c3af87d6766375862767611a7
--- /dev/null
+++ b/src/context/service/database/Constraint.py
@@ -0,0 +1,130 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, logging
+from sqlalchemy import delete
+from sqlalchemy.dialects.postgresql import insert
+from sqlalchemy.orm import Session
+from typing import Dict, List, Optional
+from common.proto.context_pb2 import Constraint
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from .models.ConstraintModel import ConstraintKindEnum, ConstraintModel
+from .uuids._Builder import get_uuid_random
+
+LOGGER = logging.getLogger(__name__)
+
+def compose_constraints_data(
+    constraints : List[Constraint], now : datetime.datetime,
+    service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None
+) -> List[Dict]:
+    dict_constraints : List[Dict] = list()
+    for position,constraint in enumerate(constraints):
+        str_kind = constraint.WhichOneof('constraint')
+        dict_constraint = {
+            'constraint_uuid': get_uuid_random(),
+            'position'       : position,
+            'kind'           : ConstraintKindEnum._member_map_.get(str_kind.upper()), # pylint: disable=no-member
+            'data'           : grpc_message_to_json_string(getattr(constraint, str_kind, {})),
+            'created_at'     : now,
+            'updated_at'     : now,
+        }
+        if service_uuid is not None: dict_constraint['service_uuid'] = service_uuid
+        if slice_uuid   is not None: dict_constraint['slice_uuid'  ] = slice_uuid
+        dict_constraints.append(dict_constraint)
+    return dict_constraints
+
+def upsert_constraints(
+    session : Session, constraints : List[Dict],
+    service_uuid : Optional[str] = None, slice_uuid : Optional[str] = None
+) -> List[bool]:
+    # TODO: do not delete all constraints; just add-remove as needed
+    stmt = delete(ConstraintModel)
+    if service_uuid is not None: stmt = stmt.where(ConstraintModel.service_uuid == service_uuid)
+    if slice_uuid   is not None: stmt = stmt.where(ConstraintModel.slice_uuid   == slice_uuid  )
+    session.execute(stmt)
+
+    constraint_updates = []
+    if len(constraints) > 0:
+        stmt = insert(ConstraintModel).values(constraints)
+        #stmt = stmt.on_conflict_do_update(
+        #    index_elements=[ConstraintModel.configrule_uuid],
+        #    set_=dict(
+        #        updated_at = stmt.excluded.updated_at,
+        #    )
+        #)
+        stmt = stmt.returning(ConstraintModel.created_at, ConstraintModel.updated_at)
+        constraint_updates = session.execute(stmt).fetchall()
+
+    return constraint_updates
+
+
+#    def set_constraint(self, db_constraints: ConstraintsModel, grpc_constraint: Constraint, position: int
+#    ) -> Tuple[Union_ConstraintModel, bool]:
+#        with self.session() as session:
+#
+#            grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint'))
+#
+#            parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind)
+#            if parser is None:
+#                raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format(
+#                    grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint)))
+#
+#            # create specific constraint
+#            constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(grpc_constraint)
+#            str_constraint_id = str(uuid.uuid4())
+#            LOGGER.info('str_constraint_id: {}'.format(str_constraint_id))
+#            # str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id]))
+#            # str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':')
+#
+#            # result : Tuple[Union_ConstraintModel, bool] = update_or_create_object(
+#            #     database, constraint_class, str_constraint_key, constraint_data)
+#            constraint_data[constraint_class.main_pk_name()] = str_constraint_id
+#            db_new_constraint = constraint_class(**constraint_data)
+#            result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint)
+#            db_specific_constraint, updated = result
+#
+#            # create generic constraint
+#            # constraint_fk_field_name = 'constraint_uuid'.format(constraint_kind.value)
+#            constraint_data = {
+#                'constraints_uuid': db_constraints.constraints_uuid, 'position': position, 'kind': constraint_kind
+#            }
+#
+#            db_new_constraint = ConstraintModel(**constraint_data)
+#            result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint)
+#            db_constraint, updated = result
+#
+#            return db_constraint, updated
+#
+#    def set_constraints(self, service_uuid: str, constraints_name : str, grpc_constraints
+#    ) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]:
+#        with self.session() as session:
+#            # str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':')
+#            # result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key)
+#            result = session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none()
+#            created = None
+#            if result:
+#                created = True
+#            session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none()
+#            db_constraints = ConstraintsModel(constraints_uuid=service_uuid)
+#            session.add(db_constraints)
+#
+#            db_objects = [(db_constraints, created)]
+#
+#            for position,grpc_constraint in enumerate(grpc_constraints):
+#                result : Tuple[ConstraintModel, bool] = self.set_constraint(
+#                    db_constraints, grpc_constraint, position)
+#                db_constraint, updated = result
+#                db_objects.append((db_constraint, updated))
+#
+#            return db_objects
diff --git a/src/context/service/database/ConstraintModel.py b/src/context/service/database/ConstraintModel.py
deleted file mode 100644
index 449dcedeeaf10686ece58607d3a5fa4f4bf6a070..0000000000000000000000000000000000000000
--- a/src/context/service/database/ConstraintModel.py
+++ /dev/null
@@ -1,246 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging, operator
-from enum import Enum
-from typing import Dict, List, Optional, Tuple, Type, Union
-from common.orm.Database import Database
-from common.orm.HighLevel import get_object, get_or_create_object, update_or_create_object
-from common.orm.backend.Tools import key_to_str
-from common.orm.fields.BooleanField import BooleanField
-from common.orm.fields.EnumeratedField import EnumeratedField
-from common.orm.fields.FloatField import FloatField
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.IntegerField import IntegerField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-from common.proto.context_pb2 import Constraint
-from common.tools.grpc.Tools import grpc_message_to_json_string
-from .EndPointModel import EndPointModel, get_endpoint
-from .Tools import fast_hasher, remove_dict_key
-
-LOGGER = logging.getLogger(__name__)
-
-class ConstraintsModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-
-    def delete(self) -> None:
-        db_constraint_pks = self.references(ConstraintModel)
-        for pk,_ in db_constraint_pks: ConstraintModel(self.database, pk).delete()
-        super().delete()
-
-    def dump(self) -> List[Dict]:
-        db_constraint_pks = self.references(ConstraintModel)
-        constraints = [ConstraintModel(self.database, pk).dump(include_position=True) for pk,_ in db_constraint_pks]
-        constraints = sorted(constraints, key=operator.itemgetter('position'))
-        return [remove_dict_key(constraint, 'position') for constraint in constraints]
-
-class ConstraintCustomModel(Model): # pylint: disable=abstract-method
-    constraint_type = StringField(required=True, allow_empty=False)
-    constraint_value = StringField(required=True, allow_empty=False)
-
-    def dump(self) -> Dict: # pylint: disable=arguments-differ
-        return {'custom': {'constraint_type': self.constraint_type, 'constraint_value': self.constraint_value}}
-
-class ConstraintEndpointLocationRegionModel(Model): # pylint: disable=abstract-method
-    endpoint_fk = ForeignKeyField(EndPointModel)
-    region = StringField(required=True, allow_empty=False)
-
-    def dump(self) -> Dict: # pylint: disable=arguments-differ
-        json_endpoint_id = EndPointModel(self.database, self.endpoint_fk).dump_id()
-        return {'endpoint_location': {'endpoint_id': json_endpoint_id, 'location': {'region': self.region}}}
-
-class ConstraintEndpointLocationGpsPositionModel(Model): # pylint: disable=abstract-method
-    endpoint_fk = ForeignKeyField(EndPointModel)
-    latitude = FloatField(required=True, min_value=-90.0, max_value=90.0)
-    longitude = FloatField(required=True, min_value=-180.0, max_value=180.0)
-
-    def dump(self) -> Dict: # pylint: disable=arguments-differ
-        gps_position = {'latitude': self.latitude, 'longitude': self.longitude}
-        json_endpoint_id = EndPointModel(self.database, self.endpoint_fk).dump_id()
-        return {'endpoint_location': {'endpoint_id': json_endpoint_id, 'location': {'gps_position': gps_position}}}
-
-class ConstraintEndpointPriorityModel(Model): # pylint: disable=abstract-method
-    endpoint_fk = ForeignKeyField(EndPointModel)
-    priority = IntegerField(required=True, min_value=0)
-
-    def dump(self) -> Dict: # pylint: disable=arguments-differ
-        json_endpoint_id = EndPointModel(self.database, self.endpoint_fk).dump_id()
-        return {'endpoint_priority': {'endpoint_id': json_endpoint_id, 'priority': self.priority}}
-
-class ConstraintSlaAvailabilityModel(Model): # pylint: disable=abstract-method
-    num_disjoint_paths = IntegerField(required=True, min_value=1)
-    all_active = BooleanField(required=True)
-
-    def dump(self) -> Dict: # pylint: disable=arguments-differ
-        return {'sla_availability': {'num_disjoint_paths': self.num_disjoint_paths, 'all_active': self.all_active}}
-
-# enum values should match name of field in ConstraintModel
-class ConstraintKindEnum(Enum):
-    CUSTOM                        = 'custom'
-    ENDPOINT_LOCATION_REGION      = 'ep_loc_region'
-    ENDPOINT_LOCATION_GPSPOSITION = 'ep_loc_gpspos'
-    ENDPOINT_PRIORITY             = 'ep_priority'
-    SLA_AVAILABILITY              = 'sla_avail'
-
-Union_SpecificConstraint = Union[
-    ConstraintCustomModel, ConstraintEndpointLocationRegionModel, ConstraintEndpointLocationGpsPositionModel,
-    ConstraintEndpointPriorityModel, ConstraintSlaAvailabilityModel,
-]
-
-class ConstraintModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    constraints_fk = ForeignKeyField(ConstraintsModel)
-    kind = EnumeratedField(ConstraintKindEnum)
-    position = IntegerField(min_value=0, required=True)
-    constraint_custom_fk        = ForeignKeyField(ConstraintCustomModel, required=False)
-    constraint_ep_loc_region_fk = ForeignKeyField(ConstraintEndpointLocationRegionModel, required=False)
-    constraint_ep_loc_gpspos_fk = ForeignKeyField(ConstraintEndpointLocationGpsPositionModel, required=False)
-    constraint_ep_priority_fk   = ForeignKeyField(ConstraintEndpointPriorityModel, required=False)
-    constraint_sla_avail_fk     = ForeignKeyField(ConstraintSlaAvailabilityModel, required=False)
-
-    def delete(self) -> None:
-        field_name = 'constraint_{:s}_fk'.format(str(self.kind.value))
-        specific_fk_value : Optional[ForeignKeyField] = getattr(self, field_name, None)
-        if specific_fk_value is None:
-            raise Exception('Unable to find constraint key for field_name({:s})'.format(field_name))
-        specific_fk_class = getattr(ConstraintModel, field_name, None)
-        foreign_model_class : Model = specific_fk_class.foreign_model
-        super().delete()
-        get_object(self.database, foreign_model_class, str(specific_fk_value)).delete()
-
-    def dump(self, include_position=True) -> Dict: # pylint: disable=arguments-differ
-        field_name = 'constraint_{:s}_fk'.format(str(self.kind.value))
-        specific_fk_value : Optional[ForeignKeyField] = getattr(self, field_name, None)
-        if specific_fk_value is None:
-            raise Exception('Unable to find constraint key for field_name({:s})'.format(field_name))
-        specific_fk_class = getattr(ConstraintModel, field_name, None)
-        foreign_model_class : Model = specific_fk_class.foreign_model
-        constraint : Union_SpecificConstraint = get_object(self.database, foreign_model_class, str(specific_fk_value))
-        result = constraint.dump()
-        if include_position: result['position'] = self.position
-        return result
-
-Tuple_ConstraintSpecs = Tuple[Type, str, Dict, ConstraintKindEnum]
-def parse_constraint_custom(database : Database, grpc_constraint) -> Tuple_ConstraintSpecs:
-    constraint_class = ConstraintCustomModel
-    str_constraint_id = grpc_constraint.custom.constraint_type
-    constraint_data = {
-        'constraint_type' : grpc_constraint.custom.constraint_type,
-        'constraint_value': grpc_constraint.custom.constraint_value,
-    }
-    return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.CUSTOM
-
-def parse_constraint_endpoint_location(database : Database, grpc_constraint) -> Tuple_ConstraintSpecs:
-    grpc_endpoint_id = grpc_constraint.endpoint_location.endpoint_id
-    str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id)
-
-    str_constraint_id = str_endpoint_key
-    constraint_data = {'endpoint_fk': db_endpoint}
-
-    grpc_location = grpc_constraint.endpoint_location.location
-    location_kind = str(grpc_location.WhichOneof('location'))
-    if location_kind == 'region':
-        constraint_class = ConstraintEndpointLocationRegionModel
-        constraint_data.update({'region': grpc_location.region})
-        return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.ENDPOINT_LOCATION_REGION
-    elif location_kind == 'gps_position':
-        constraint_class = ConstraintEndpointLocationGpsPositionModel
-        gps_position = grpc_location.gps_position
-        constraint_data.update({'latitude': gps_position.latitude, 'longitude': gps_position.longitude})
-        return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.ENDPOINT_LOCATION_GPSPOSITION
-    else:
-        MSG = 'Location kind {:s} in Constraint of kind endpoint_location is not implemented: {:s}'
-        raise NotImplementedError(MSG.format(location_kind, grpc_message_to_json_string(grpc_constraint)))
-
-def parse_constraint_endpoint_priority(database : Database, grpc_constraint) -> Tuple_ConstraintSpecs:
-    grpc_endpoint_id = grpc_constraint.endpoint_priority.endpoint_id
-    str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id)
-
-    constraint_class = ConstraintEndpointPriorityModel
-    str_constraint_id = str_endpoint_key
-    priority = grpc_constraint.endpoint_priority.priority
-    constraint_data = {'endpoint_fk': db_endpoint, 'priority': priority}
-
-    return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.ENDPOINT_PRIORITY
-
-def parse_constraint_sla_availability(database : Database, grpc_constraint) -> Tuple_ConstraintSpecs:
-    constraint_class = ConstraintSlaAvailabilityModel
-    str_constraint_id = ''
-    constraint_data = {
-        'num_disjoint_paths' : grpc_constraint.sla_availability.num_disjoint_paths,
-        'all_active': grpc_constraint.sla_availability.all_active,
-    }
-    return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.SLA_AVAILABILITY
-
-CONSTRAINT_PARSERS = {
-    'custom'            : parse_constraint_custom,
-    'endpoint_location' : parse_constraint_endpoint_location,
-    'endpoint_priority' : parse_constraint_endpoint_priority,
-    'sla_availability'  : parse_constraint_sla_availability,
-}
-
-Union_ConstraintModel = Union[
-    ConstraintCustomModel, ConstraintEndpointLocationGpsPositionModel, ConstraintEndpointLocationRegionModel,
-    ConstraintEndpointPriorityModel, ConstraintSlaAvailabilityModel
-]
-
-def set_constraint(
-    database : Database, db_constraints : ConstraintsModel, grpc_constraint : Constraint, position : int
-) -> Tuple[Union_ConstraintModel, bool]:
-    grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint'))
-
-    parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind)
-    if parser is None:
-        raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format(
-            grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint)))
-
-    # create specific constraint
-    constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(database, grpc_constraint)
-    str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id]))
-    str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':')
-    result : Tuple[Union_ConstraintModel, bool] = update_or_create_object(
-        database, constraint_class, str_constraint_key, constraint_data)
-    db_specific_constraint, updated = result
-
-    # create generic constraint
-    constraint_fk_field_name = 'constraint_{:s}_fk'.format(constraint_kind.value)
-    constraint_data = {
-        'constraints_fk': db_constraints, 'position': position, 'kind': constraint_kind,
-        constraint_fk_field_name: db_specific_constraint
-    }
-    result : Tuple[ConstraintModel, bool] = update_or_create_object(
-        database, ConstraintModel, str_constraint_key, constraint_data)
-    db_constraint, updated = result
-
-    return db_constraint, updated
-
-def set_constraints(
-    database : Database, db_parent_pk : str, constraints_name : str, grpc_constraints
-) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]:
-
-    str_constraints_key = key_to_str([constraints_name, db_parent_pk], separator=':')
-    result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key)
-    db_constraints, created = result
-
-    db_objects = [(db_constraints, created)]
-
-    for position,grpc_constraint in enumerate(grpc_constraints):
-        result : Tuple[ConstraintModel, bool] = set_constraint(
-            database, db_constraints, grpc_constraint, position)
-        db_constraint, updated = result
-        db_objects.append((db_constraint, updated))
-
-    return db_objects
diff --git a/src/context/service/database/Context.py b/src/context/service/database/Context.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4fd13b22be9ff816fad044d4eb06779ccf7983d
--- /dev/null
+++ b/src/context/service/database/Context.py
@@ -0,0 +1,103 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, logging
+from sqlalchemy.dialects.postgresql import insert
+from sqlalchemy.engine import Engine
+from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy_cockroachdb import run_transaction
+from typing import Dict, List, Optional, Tuple
+from common.proto.context_pb2 import Context, ContextId
+from common.method_wrappers.ServiceExceptions import NotFoundException
+from common.tools.object_factory.Context import json_context_id
+from .models.ContextModel import ContextModel
+from .uuids.Context import context_get_uuid
+
+LOGGER = logging.getLogger(__name__)
+
+def context_list_ids(db_engine : Engine) -> List[Dict]:
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[ContextModel] = session.query(ContextModel).all()
+        return [obj.dump_id() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def context_list_objs(db_engine : Engine) -> List[Dict]:
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[ContextModel] = session.query(ContextModel).all()
+        return [obj.dump() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def context_get(db_engine : Engine, request : ContextId) -> Dict:
+    context_uuid = context_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> Optional[Dict]:
+        obj : Optional[ContextModel] = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none()
+        return None if obj is None else obj.dump()
+    obj = run_transaction(sessionmaker(bind=db_engine), callback)
+    if obj is None:
+        raw_context_uuid = request.context_uuid.uuid
+        raise NotFoundException('Context', raw_context_uuid, extra_details=[
+            'context_uuid generated was: {:s}'.format(context_uuid)
+        ])
+    return obj
+
+def context_set(db_engine : Engine, request : Context) -> Tuple[Dict, bool]:
+    context_name = request.name
+    if len(context_name) == 0: context_name = request.context_id.context_uuid.uuid
+    context_uuid = context_get_uuid(request.context_id, context_name=context_name, allow_random=True)
+
+    # Ignore request.topology_ids, request.service_ids, and request.slice_ids. They are used
+    # for retrieving topologies, services and slices added into the context. Explicit addition
+    # into the context is done automatically qhen creating the topology, service or slice
+    # specifying the associated context.
+
+    if len(request.topology_ids) > 0:   # pragma: no cover
+        LOGGER.warning('Items in field "topology_ids" ignored. This field is used for retrieval purposes only.')
+
+    if len(request.service_ids) > 0:    # pragma: no cover
+        LOGGER.warning('Items in field "service_ids" ignored. This field is used for retrieval purposes only.')
+
+    if len(request.slice_ids) > 0:      # pragma: no cover
+        LOGGER.warning('Items in field "slice_ids" ignored. This field is used for retrieval purposes only.')
+
+    now = datetime.datetime.utcnow()
+    context_data = [{
+        'context_uuid': context_uuid,
+        'context_name': context_name,
+        'created_at'  : now,
+        'updated_at'  : now,
+    }]
+
+    def callback(session : Session) -> bool:
+        stmt = insert(ContextModel).values(context_data)
+        stmt = stmt.on_conflict_do_update(
+            index_elements=[ContextModel.context_uuid],
+            set_=dict(
+                context_name = stmt.excluded.context_name,
+                updated_at   = stmt.excluded.updated_at,
+            )
+        )
+        stmt = stmt.returning(ContextModel.created_at, ContextModel.updated_at)
+        created_at,updated_at = session.execute(stmt).fetchone()
+        return updated_at > created_at
+
+    updated = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_context_id(context_uuid),updated
+
+def context_delete(db_engine : Engine, request : ContextId) -> Tuple[Dict, bool]:
+    context_uuid = context_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> bool:
+        num_deleted = session.query(ContextModel).filter_by(context_uuid=context_uuid).delete()
+        return num_deleted > 0
+    deleted = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_context_id(context_uuid),deleted
diff --git a/src/context/service/database/ContextModel.py b/src/context/service/database/ContextModel.py
deleted file mode 100644
index a12e6669dbd9c506655fd3e2265dab7b25ca90dd..0000000000000000000000000000000000000000
--- a/src/context/service/database/ContextModel.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from typing import Dict, List
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-
-LOGGER = logging.getLogger(__name__)
-
-class ContextModel(Model):
-    pk = PrimaryKeyField()
-    context_uuid = StringField(required=True, allow_empty=False)
-
-    def dump_id(self) -> Dict:
-        return {'context_uuid': {'uuid': self.context_uuid}}
-
-    def dump_service_ids(self) -> List[Dict]:
-        from .ServiceModel import ServiceModel # pylint: disable=import-outside-toplevel
-        db_service_pks = self.references(ServiceModel)
-        return [ServiceModel(self.database, pk).dump_id() for pk,_ in db_service_pks]
-
-    def dump_topology_ids(self) -> List[Dict]:
-        from .TopologyModel import TopologyModel # pylint: disable=import-outside-toplevel
-        db_topology_pks = self.references(TopologyModel)
-        return [TopologyModel(self.database, pk).dump_id() for pk,_ in db_topology_pks]
-
-    def dump(self, include_services=True, include_topologies=True) -> Dict: # pylint: disable=arguments-differ
-        result = {'context_id': self.dump_id()}
-        if include_services: result['service_ids'] = self.dump_service_ids()
-        if include_topologies: result['topology_ids'] = self.dump_topology_ids()
-        return result
diff --git a/src/context/service/database/Device.py b/src/context/service/database/Device.py
new file mode 100644
index 0000000000000000000000000000000000000000..07d1c76061d8b228cf39ddc06d358190bfce48fd
--- /dev/null
+++ b/src/context/service/database/Device.py
@@ -0,0 +1,173 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, logging
+from sqlalchemy.dialects.postgresql import insert
+from sqlalchemy.engine import Engine
+from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy_cockroachdb import run_transaction
+from typing import Dict, List, Optional, Set, Tuple
+from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
+from common.proto.context_pb2 import Device, DeviceId
+from common.tools.object_factory.Device import json_device_id
+from .models.DeviceModel import DeviceModel
+from .models.EndPointModel import EndPointModel
+from .models.TopologyModel import TopologyDeviceModel
+from .models.enums.DeviceDriver import grpc_to_enum__device_driver
+from .models.enums.DeviceOperationalStatus import grpc_to_enum__device_operational_status
+from .models.enums.KpiSampleType import grpc_to_enum__kpi_sample_type
+from .uuids.Device import device_get_uuid
+from .uuids.EndPoint import endpoint_get_uuid
+from .ConfigRule import compose_config_rules_data, upsert_config_rules
+
+LOGGER = logging.getLogger(__name__)
+
+def device_list_ids(db_engine : Engine) -> List[Dict]:
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[DeviceModel] = session.query(DeviceModel).all()
+        return [obj.dump_id() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def device_list_objs(db_engine : Engine) -> List[Dict]:
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[DeviceModel] = session.query(DeviceModel).all()
+        return [obj.dump() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def device_get(db_engine : Engine, request : DeviceId) -> Dict:
+    device_uuid = device_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> Optional[Dict]:
+        obj : Optional[DeviceModel] = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none()
+        return None if obj is None else obj.dump()
+    obj = run_transaction(sessionmaker(bind=db_engine), callback)
+    if obj is None:
+        raw_device_uuid = request.device_uuid.uuid
+        raise NotFoundException('Device', raw_device_uuid, extra_details=[
+            'device_uuid generated was: {:s}'.format(device_uuid)
+        ])
+    return obj
+
+def device_set(db_engine : Engine, request : Device) -> Tuple[Dict, bool]:
+    raw_device_uuid = request.device_id.device_uuid.uuid
+    raw_device_name = request.name
+    device_name = raw_device_uuid if len(raw_device_name) == 0 else raw_device_name
+    device_uuid = device_get_uuid(request.device_id, device_name=device_name, allow_random=True)
+
+    device_type = request.device_type
+    oper_status = grpc_to_enum__device_operational_status(request.device_operational_status)
+    device_drivers = [grpc_to_enum__device_driver(d) for d in request.device_drivers]
+
+    now = datetime.datetime.utcnow()
+
+    topology_uuids : Set[str] = set()
+    related_topologies : List[Dict] = list()
+    endpoints_data : List[Dict] = list()
+    for i, endpoint in enumerate(request.device_endpoints):
+        endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid
+        if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid
+        if endpoint_device_uuid not in {raw_device_uuid, device_uuid}:
+            raise InvalidArgumentException(
+                'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid,
+                ['should be == request.device_id.device_uuid.uuid({:s})'.format(raw_device_uuid)]
+            )
+
+        raw_endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
+        raw_endpoint_name = endpoint.name
+        endpoint_topology_uuid, endpoint_device_uuid, endpoint_uuid = endpoint_get_uuid(
+            endpoint.endpoint_id, endpoint_name=raw_endpoint_name, allow_random=True)
+        endpoint_name = raw_endpoint_uuid if len(raw_endpoint_name) == 0 else raw_endpoint_name
+
+        kpi_sample_types = [grpc_to_enum__kpi_sample_type(kst) for kst in endpoint.kpi_sample_types]
+
+        endpoints_data.append({
+            'endpoint_uuid'   : endpoint_uuid,
+            'device_uuid'     : endpoint_device_uuid,
+            'topology_uuid'   : endpoint_topology_uuid,
+            'name'            : endpoint_name,
+            'endpoint_type'   : endpoint.endpoint_type,
+            'kpi_sample_types': kpi_sample_types,
+            'created_at'      : now,
+            'updated_at'      : now,
+        })
+
+        if endpoint_topology_uuid not in topology_uuids:
+            related_topologies.append({
+                'topology_uuid': endpoint_topology_uuid,
+                'device_uuid'  : device_uuid,
+            })
+            topology_uuids.add(endpoint_topology_uuid)
+
+    config_rules = compose_config_rules_data(request.device_config.config_rules, now, device_uuid=device_uuid)
+
+    device_data = [{
+        'device_uuid'              : device_uuid,
+        'device_name'              : device_name,
+        'device_type'              : device_type,
+        'device_operational_status': oper_status,
+        'device_drivers'           : device_drivers,
+        'created_at'               : now,
+        'updated_at'               : now,
+    }]
+
+    def callback(session : Session) -> bool:
+        stmt = insert(DeviceModel).values(device_data)
+        stmt = stmt.on_conflict_do_update(
+            index_elements=[DeviceModel.device_uuid],
+            set_=dict(
+                device_name               = stmt.excluded.device_name,
+                device_type               = stmt.excluded.device_type,
+                device_operational_status = stmt.excluded.device_operational_status,
+                device_drivers            = stmt.excluded.device_drivers,
+                updated_at                = stmt.excluded.updated_at,
+            )
+        )
+        stmt = stmt.returning(DeviceModel.created_at, DeviceModel.updated_at)
+        created_at,updated_at = session.execute(stmt).fetchone()
+        updated = updated_at > created_at
+
+        if len(endpoints_data) > 0:
+            stmt = insert(EndPointModel).values(endpoints_data)
+            stmt = stmt.on_conflict_do_update(
+                index_elements=[EndPointModel.endpoint_uuid],
+                set_=dict(
+                    name             = stmt.excluded.name,
+                    endpoint_type    = stmt.excluded.endpoint_type,
+                    kpi_sample_types = stmt.excluded.kpi_sample_types,
+                    updated_at       = stmt.excluded.updated_at,
+                )
+            )
+            stmt = stmt.returning(EndPointModel.created_at, EndPointModel.updated_at)
+            endpoint_updates = session.execute(stmt).fetchall()
+            updated = updated or any([(updated_at > created_at) for created_at,updated_at in endpoint_updates])
+
+        if len(related_topologies) > 0:
+            session.execute(insert(TopologyDeviceModel).values(related_topologies).on_conflict_do_nothing(
+                index_elements=[TopologyDeviceModel.topology_uuid, TopologyDeviceModel.device_uuid]
+            ))
+
+        configrule_updates = upsert_config_rules(session, config_rules, device_uuid=device_uuid)
+        updated = updated or any([(updated_at > created_at) for created_at,updated_at in configrule_updates])
+
+        return updated
+
+    updated = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_device_id(device_uuid),updated
+
+def device_delete(db_engine : Engine, request : DeviceId) -> Tuple[Dict, bool]:
+    device_uuid = device_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> bool:
+        num_deleted = session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete()
+        return num_deleted > 0
+    deleted = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_device_id(device_uuid),deleted
diff --git a/src/context/service/database/DeviceModel.py b/src/context/service/database/DeviceModel.py
deleted file mode 100644
index 0ffb97fee51da62802a1f7eb730380ba7a89dc0f..0000000000000000000000000000000000000000
--- a/src/context/service/database/DeviceModel.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import functools, logging
-from enum import Enum
-from typing import Dict, List
-from common.orm.Database import Database
-from common.orm.backend.Tools import key_to_str
-from common.orm.fields.EnumeratedField import EnumeratedField
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-from common.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusEnum
-from .ConfigModel import ConfigModel
-from .Tools import grpc_to_enum
-
-LOGGER = logging.getLogger(__name__)
-
-class ORM_DeviceDriverEnum(Enum):
-    UNDEFINED             = DeviceDriverEnum.DEVICEDRIVER_UNDEFINED
-    OPENCONFIG            = DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG
-    TRANSPORT_API         = DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API
-    P4                    = DeviceDriverEnum.DEVICEDRIVER_P4
-    IETF_NETWORK_TOPOLOGY = DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY
-    ONF_TR_352            = DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352
-    XR                    = DeviceDriverEnum.DEVICEDRIVER_XR
-
-grpc_to_enum__device_driver = functools.partial(
-    grpc_to_enum, DeviceDriverEnum, ORM_DeviceDriverEnum)
-
-class ORM_DeviceOperationalStatusEnum(Enum):
-    UNDEFINED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED
-    DISABLED  = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
-    ENABLED   = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
-
-grpc_to_enum__device_operational_status = functools.partial(
-    grpc_to_enum, DeviceOperationalStatusEnum, ORM_DeviceOperationalStatusEnum)
-
-class DeviceModel(Model):
-    pk = PrimaryKeyField()
-    device_uuid = StringField(required=True, allow_empty=False)
-    device_type = StringField()
-    device_config_fk = ForeignKeyField(ConfigModel)
-    device_operational_status = EnumeratedField(ORM_DeviceOperationalStatusEnum, required=True)
-
-    def delete(self) -> None:
-        # pylint: disable=import-outside-toplevel
-        from .EndPointModel import EndPointModel
-        from .RelationModels import TopologyDeviceModel
-
-        for db_endpoint_pk,_ in self.references(EndPointModel):
-            EndPointModel(self.database, db_endpoint_pk).delete()
-
-        for db_topology_device_pk,_ in self.references(TopologyDeviceModel):
-            TopologyDeviceModel(self.database, db_topology_device_pk).delete()
-
-        for db_driver_pk,_ in self.references(DriverModel):
-            DriverModel(self.database, db_driver_pk).delete()
-
-        super().delete()
-
-        ConfigModel(self.database, self.device_config_fk).delete()
-
-    def dump_id(self) -> Dict:
-        return {'device_uuid': {'uuid': self.device_uuid}}
-
-    def dump_config(self) -> Dict:
-        return ConfigModel(self.database, self.device_config_fk).dump()
-
-    def dump_drivers(self) -> List[int]:
-        db_driver_pks = self.references(DriverModel)
-        return [DriverModel(self.database, pk).dump() for pk,_ in db_driver_pks]
-
-    def dump_endpoints(self) -> List[Dict]:
-        from .EndPointModel import EndPointModel # pylint: disable=import-outside-toplevel
-        db_endpoints_pks = self.references(EndPointModel)
-        return [EndPointModel(self.database, pk).dump() for pk,_ in db_endpoints_pks]
-
-    def dump(   # pylint: disable=arguments-differ
-            self, include_config_rules=True, include_drivers=True, include_endpoints=True
-        ) -> Dict:
-        result = {
-            'device_id': self.dump_id(),
-            'device_type': self.device_type,
-            'device_operational_status': self.device_operational_status.value,
-        }
-        if include_config_rules: result.setdefault('device_config', {})['config_rules'] = self.dump_config()
-        if include_drivers: result['device_drivers'] = self.dump_drivers()
-        if include_endpoints: result['device_endpoints'] = self.dump_endpoints()
-        return result
-
-class DriverModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    device_fk = ForeignKeyField(DeviceModel)
-    driver = EnumeratedField(ORM_DeviceDriverEnum, required=True)
-
-    def dump(self) -> Dict:
-        return self.driver.value
-
-def set_drivers(database : Database, db_device : DeviceModel, grpc_device_drivers):
-    db_device_pk = db_device.pk
-    for driver in grpc_device_drivers:
-        orm_driver = grpc_to_enum__device_driver(driver)
-        str_device_driver_key = key_to_str([db_device_pk, orm_driver.name])
-        db_device_driver = DriverModel(database, str_device_driver_key)
-        db_device_driver.device_fk = db_device
-        db_device_driver.driver = orm_driver
-        db_device_driver.save()
diff --git a/src/context/service/database/EndPointModel.py b/src/context/service/database/EndPointModel.py
deleted file mode 100644
index aeef91b654dfaaaaf14d53f625126632b7303741..0000000000000000000000000000000000000000
--- a/src/context/service/database/EndPointModel.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from typing import Dict, List, Optional, Tuple
-from common.orm.Database import Database
-from common.orm.HighLevel import get_object
-from common.orm.backend.Tools import key_to_str
-from common.orm.fields.EnumeratedField import EnumeratedField
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-from common.proto.context_pb2 import EndPointId
-from .DeviceModel import DeviceModel
-from .KpiSampleType import ORM_KpiSampleTypeEnum, grpc_to_enum__kpi_sample_type
-from .TopologyModel import TopologyModel
-
-LOGGER = logging.getLogger(__name__)
-
-class EndPointModel(Model):
-    pk = PrimaryKeyField()
-    topology_fk = ForeignKeyField(TopologyModel, required=False)
-    device_fk = ForeignKeyField(DeviceModel)
-    endpoint_uuid = StringField(required=True, allow_empty=False)
-    endpoint_type = StringField()
-
-    def delete(self) -> None:
-        for db_kpi_sample_type_pk,_ in self.references(KpiSampleTypeModel):
-            KpiSampleTypeModel(self.database, db_kpi_sample_type_pk).delete()
-        super().delete()
-
-    def dump_id(self) -> Dict:
-        device_id = DeviceModel(self.database, self.device_fk).dump_id()
-        result = {
-            'device_id': device_id,
-            'endpoint_uuid': {'uuid': self.endpoint_uuid},
-        }
-        if self.topology_fk is not None:
-            result['topology_id'] = TopologyModel(self.database, self.topology_fk).dump_id()
-        return result
-
-    def dump_kpi_sample_types(self) -> List[int]:
-        db_kpi_sample_type_pks = self.references(KpiSampleTypeModel)
-        return [KpiSampleTypeModel(self.database, pk).dump() for pk,_ in db_kpi_sample_type_pks]
-
-    def dump(   # pylint: disable=arguments-differ
-            self, include_kpi_sample_types=True
-        ) -> Dict:
-        result = {
-            'endpoint_id': self.dump_id(),
-            'endpoint_type': self.endpoint_type,
-        }
-        if include_kpi_sample_types: result['kpi_sample_types'] = self.dump_kpi_sample_types()
-        return result
-
-class KpiSampleTypeModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    endpoint_fk = ForeignKeyField(EndPointModel)
-    kpi_sample_type = EnumeratedField(ORM_KpiSampleTypeEnum, required=True)
-
-    def dump(self) -> Dict:
-        return self.kpi_sample_type.value
-
-def set_kpi_sample_types(database : Database, db_endpoint : EndPointModel, grpc_endpoint_kpi_sample_types):
-    db_endpoint_pk = db_endpoint.pk
-    for kpi_sample_type in grpc_endpoint_kpi_sample_types:
-        orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type)
-        str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, orm_kpi_sample_type.name])
-        db_endpoint_kpi_sample_type = KpiSampleTypeModel(database, str_endpoint_kpi_sample_type_key)
-        db_endpoint_kpi_sample_type.endpoint_fk = db_endpoint
-        db_endpoint_kpi_sample_type.kpi_sample_type = orm_kpi_sample_type
-        db_endpoint_kpi_sample_type.save()
-
-def get_endpoint(
-    database : Database, grpc_endpoint_id : EndPointId,
-    validate_topology_exists : bool = True, validate_device_in_topology : bool = True
-) -> Tuple[str, EndPointModel]:
-    endpoint_uuid                  = grpc_endpoint_id.endpoint_uuid.uuid
-    endpoint_device_uuid           = grpc_endpoint_id.device_id.device_uuid.uuid
-    endpoint_topology_uuid         = grpc_endpoint_id.topology_id.topology_uuid.uuid
-    endpoint_topology_context_uuid = grpc_endpoint_id.topology_id.context_id.context_uuid.uuid
-    str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid])
-
-    if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
-        # check topology exists
-        str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
-        if validate_topology_exists:
-            from .TopologyModel import TopologyModel
-            get_object(database, TopologyModel, str_topology_key)
-
-        # check device is in topology
-        str_topology_device_key = key_to_str([str_topology_key, endpoint_device_uuid], separator='--')
-        if validate_device_in_topology:
-            from .RelationModels import TopologyDeviceModel
-            get_object(database, TopologyDeviceModel, str_topology_device_key)
-
-        str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
-
-    db_endpoint : EndPointModel = get_object(database, EndPointModel, str_endpoint_key)
-    return str_endpoint_key, db_endpoint
diff --git a/src/context/service/database/Engine.py b/src/context/service/database/Engine.py
new file mode 100644
index 0000000000000000000000000000000000000000..a37ec0c1e88f6fd8b7f46fd5f6b0383d32cde2b8
--- /dev/null
+++ b/src/context/service/database/Engine.py
@@ -0,0 +1,60 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, sqlalchemy, sqlalchemy_utils
+from common.Settings import get_setting
+
+LOGGER = logging.getLogger(__name__)
+
+APP_NAME = 'tfs'
+ECHO = False # true: dump SQL commands and transactions executed
+CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}'
+
+class Engine:
+    @staticmethod
+    def get_engine() -> sqlalchemy.engine.Engine:
+        CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE')
+        CRDB_SQL_PORT  = get_setting('CRDB_SQL_PORT')
+        CRDB_DATABASE  = get_setting('CRDB_DATABASE')
+        CRDB_USERNAME  = get_setting('CRDB_USERNAME')
+        CRDB_PASSWORD  = get_setting('CRDB_PASSWORD')
+        CRDB_SSLMODE   = get_setting('CRDB_SSLMODE')
+
+        crdb_uri = CRDB_URI_TEMPLATE.format(
+            CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
+
+        try:
+            engine = sqlalchemy.create_engine(
+                crdb_uri, connect_args={'application_name': APP_NAME}, echo=ECHO, future=True)
+        except: # pylint: disable=bare-except # pragma: no cover
+            LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri)))
+            return None
+
+        try:
+            Engine.create_database(engine)
+        except: # pylint: disable=bare-except # pragma: no cover
+            LOGGER.exception('Failed to check/create to database: {:s}'.format(str(crdb_uri)))
+            return None
+
+        return engine
+
+    @staticmethod
+    def create_database(engine : sqlalchemy.engine.Engine) -> None:
+        if not sqlalchemy_utils.database_exists(engine.url):
+            sqlalchemy_utils.create_database(engine.url)
+
+    @staticmethod
+    def drop_database(engine : sqlalchemy.engine.Engine) -> None:
+        if sqlalchemy_utils.database_exists(engine.url):
+            sqlalchemy_utils.drop_database(engine.url)
diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d9e80894b47b1e935453e09820134181f5b936c
--- /dev/null
+++ b/src/context/service/database/Link.py
@@ -0,0 +1,127 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, logging
+from sqlalchemy.dialects.postgresql import insert
+from sqlalchemy.engine import Engine
+from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy_cockroachdb import run_transaction
+from typing import Dict, List, Optional, Set, Tuple
+from common.proto.context_pb2 import Link, LinkId
+from common.method_wrappers.ServiceExceptions import NotFoundException
+from common.tools.object_factory.Link import json_link_id
+from .models.LinkModel import LinkModel, LinkEndPointModel
+from .models.TopologyModel import TopologyLinkModel
+from .uuids.EndPoint import endpoint_get_uuid
+from .uuids.Link import link_get_uuid
+
+LOGGER = logging.getLogger(__name__)
+
+def link_list_ids(db_engine : Engine) -> List[Dict]:
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[LinkModel] = session.query(LinkModel).all()
+        return [obj.dump_id() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def link_list_objs(db_engine : Engine) -> List[Dict]:
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[LinkModel] = session.query(LinkModel).all()
+        return [obj.dump() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def link_get(db_engine : Engine, request : LinkId) -> Dict:
+    link_uuid = link_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> Optional[Dict]:
+        obj : Optional[LinkModel] = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none()
+        return None if obj is None else obj.dump()
+    obj = run_transaction(sessionmaker(bind=db_engine), callback)
+    if obj is None:
+        raw_link_uuid = request.link_uuid.uuid
+        raise NotFoundException('Link', raw_link_uuid, extra_details=[
+            'link_uuid generated was: {:s}'.format(link_uuid)
+        ])
+    return obj
+
+def link_set(db_engine : Engine, request : Link) -> Tuple[Dict, bool]:
+    raw_link_uuid = request.link_id.link_uuid.uuid
+    raw_link_name = request.name
+    link_name = raw_link_uuid if len(raw_link_name) == 0 else raw_link_name
+    link_uuid = link_get_uuid(request.link_id, link_name=link_name, allow_random=True)
+
+    now = datetime.datetime.utcnow()
+
+    topology_uuids : Set[str] = set()
+    related_topologies : List[Dict] = list()
+    link_endpoints_data : List[Dict] = list()
+    for endpoint_id in request.link_endpoint_ids:
+        endpoint_topology_uuid, _, endpoint_uuid = endpoint_get_uuid(
+            endpoint_id, allow_random=False)
+
+        link_endpoints_data.append({
+            'link_uuid'    : link_uuid,
+            'endpoint_uuid': endpoint_uuid,
+        })
+
+        if endpoint_topology_uuid not in topology_uuids:
+            related_topologies.append({
+                'topology_uuid': endpoint_topology_uuid,
+                'link_uuid'    : link_uuid,
+            })
+            topology_uuids.add(endpoint_topology_uuid)
+
+    link_data = [{
+        'link_uuid' : link_uuid,
+        'link_name' : link_name,
+        'created_at': now,
+        'updated_at': now,
+    }]
+
+    def callback(session : Session) -> bool:
+        stmt = insert(LinkModel).values(link_data)
+        stmt = stmt.on_conflict_do_update(
+            index_elements=[LinkModel.link_uuid],
+            set_=dict(
+                link_name  = stmt.excluded.link_name,
+                updated_at = stmt.excluded.updated_at,
+            )
+        )
+        stmt = stmt.returning(LinkModel.created_at, LinkModel.updated_at)
+        created_at,updated_at = session.execute(stmt).fetchone()
+        updated = updated_at > created_at
+
+        if len(link_endpoints_data) > 0:
+            # TODO: manage add/remove of endpoints; manage changes in relations with topology
+            stmt = insert(LinkEndPointModel).values(link_endpoints_data)
+            stmt = stmt.on_conflict_do_nothing(
+                index_elements=[LinkEndPointModel.link_uuid, LinkEndPointModel.endpoint_uuid]
+            )
+            session.execute(stmt)
+
+        if len(related_topologies) > 0:
+            session.execute(insert(TopologyLinkModel).values(related_topologies).on_conflict_do_nothing(
+                index_elements=[TopologyLinkModel.topology_uuid, TopologyLinkModel.link_uuid]
+            ))
+
+        return updated
+
+    updated = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_link_id(link_uuid),updated
+
+def link_delete(db_engine : Engine, request : LinkId) -> Tuple[Dict, bool]:
+    link_uuid = link_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> bool:
+        num_deleted = session.query(LinkModel).filter_by(link_uuid=link_uuid).delete()
+        return num_deleted > 0
+    deleted = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_link_id(link_uuid),deleted
diff --git a/src/context/service/database/LinkModel.py b/src/context/service/database/LinkModel.py
deleted file mode 100644
index 8f1d971c3127371e0d9a1a401d885a02269bd8dd..0000000000000000000000000000000000000000
--- a/src/context/service/database/LinkModel.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging, operator
-from typing import Dict, List
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-from common.orm.HighLevel import get_related_objects
-
-LOGGER = logging.getLogger(__name__)
-
-class LinkModel(Model):
-    pk = PrimaryKeyField()
-    link_uuid = StringField(required=True, allow_empty=False)
-
-    def delete(self) -> None:
-        #pylint: disable=import-outside-toplevel
-        from .RelationModels import LinkEndPointModel, TopologyLinkModel
-
-        for db_link_endpoint_pk,_ in self.references(LinkEndPointModel):
-            LinkEndPointModel(self.database, db_link_endpoint_pk).delete()
-
-        for db_topology_link_pk,_ in self.references(TopologyLinkModel):
-            TopologyLinkModel(self.database, db_topology_link_pk).delete()
-
-        super().delete()
-
-    def dump_id(self) -> Dict:
-        return {'link_uuid': {'uuid': self.link_uuid}}
-
-    def dump_endpoint_ids(self) -> List[Dict]:
-        from .RelationModels import LinkEndPointModel # pylint: disable=import-outside-toplevel
-        db_endpoints = get_related_objects(self, LinkEndPointModel, 'endpoint_fk')
-        return [db_endpoint.dump_id() for db_endpoint in sorted(db_endpoints, key=operator.attrgetter('pk'))]
-
-    def dump(self) -> Dict:
-        return {
-            'link_id': self.dump_id(),
-            'link_endpoint_ids': self.dump_endpoint_ids(),
-        }
diff --git a/src/context/service/database/PolicyRule.py b/src/context/service/database/PolicyRule.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a37c7d8aaf9e412e330fc6060142ea591eaee0
--- /dev/null
+++ b/src/context/service/database/PolicyRule.py
@@ -0,0 +1,138 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, json
+from sqlalchemy.dialects.postgresql import insert
+from sqlalchemy.engine import Engine
+from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy_cockroachdb import run_transaction
+from typing import Dict, List, Optional, Set, Tuple
+from common.proto.policy_pb2 import PolicyRule, PolicyRuleId, PolicyRuleIdList, PolicyRuleList
+from common.method_wrappers.ServiceExceptions import NotFoundException
+from common.tools.grpc.Tools import grpc_message_to_json
+from common.tools.object_factory.PolicyRule import json_policyrule_id
+from context.service.database.uuids.Device import device_get_uuid
+from .models.enums.PolicyRuleState import grpc_to_enum__policyrule_state
+from .models.PolicyRuleModel import PolicyRuleDeviceModel, PolicyRuleKindEnum, PolicyRuleModel
+from .uuids.PolicuRule import policyrule_get_uuid
+from .uuids.Service import service_get_uuid
+
+def policyrule_list_ids(db_engine : Engine) -> List[Dict]:
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel).all()
+        #.options(selectinload(PolicyRuleModel.topology)).filter_by(context_uuid=context_uuid).one_or_none()
+        return [obj.dump_id() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def policyrule_list_objs(db_engine : Engine) -> List[Dict]:
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[PolicyRuleModel] = session.query(PolicyRuleModel).all()
+        #.options(selectinload(PolicyRuleModel.topology)).filter_by(context_uuid=context_uuid).one_or_none()
+        return [obj.dump() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def policyrule_get(db_engine : Engine, request : PolicyRuleId) -> PolicyRule:
+    policyrule_uuid = policyrule_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> Optional[Dict]:
+        obj : Optional[PolicyRuleModel] = session.query(PolicyRuleModel)\
+            .filter_by(policyrule_uuid=policyrule_uuid).one_or_none()
+        return None if obj is None else obj.dump()
+    obj = run_transaction(sessionmaker(bind=db_engine), callback)
+    if obj is None:
+        raw_policyrule_uuid = request.uuid.uuid
+        raise NotFoundException('PolicyRule', raw_policyrule_uuid, extra_details=[
+            'policyrule_uuid generated was: {:s}'.format(policyrule_uuid)
+        ])
+    return obj
+
+def policyrule_set(db_engine : Engine, request : PolicyRule) -> Tuple[PolicyRuleId, bool]:
+    policyrule_kind = request.WhichOneof('policy_rule')
+    policyrule_spec = getattr(request, policyrule_kind)
+    policyrule_basic = policyrule_spec.policyRuleBasic
+    policyrule_id = policyrule_basic.policyRuleId
+    policyrule_uuid = policyrule_get_uuid(policyrule_id, allow_random=False)
+
+    policyrule_kind  = PolicyRuleKindEnum._member_map_.get(policyrule_kind.upper()) # pylint: disable=no-member
+    policyrule_state = grpc_to_enum__policyrule_state(policyrule_basic.policyRuleState.policyRuleState)
+    policyrule_state_message = policyrule_basic.policyRuleState.policyRuleStateMessage
+
+    json_policyrule_basic = grpc_message_to_json(policyrule_basic)
+    policyrule_eca_data = json.dumps({
+        'conditionList': json_policyrule_basic.get('conditionList', []),
+        'booleanOperator': json_policyrule_basic['booleanOperator'],
+        'actionList': json_policyrule_basic.get('actionList', []),
+    }, sort_keys=True)
+
+    now = datetime.datetime.utcnow()
+
+    policyrule_data = [{
+        'policyrule_uuid'         : policyrule_uuid,
+        'policyrule_kind'         : policyrule_kind,
+        'policyrule_state'        : policyrule_state,
+        'policyrule_state_message': policyrule_state_message,
+        'policyrule_priority'     : policyrule_basic.priority,
+        'policyrule_eca_data'     : policyrule_eca_data,
+        'created_at'              : now,
+        'updated_at'              : now,
+    }]
+
+    policyrule_service_uuid = None
+    if policyrule_kind == PolicyRuleKindEnum.SERVICE:
+        _,policyrule_service_uuid = service_get_uuid(policyrule_spec.serviceId, allow_random=False)
+        policyrule_data[0]['policyrule_service_uuid'] = policyrule_service_uuid
+
+    device_uuids : Set[str] = set()
+    related_devices : List[Dict] = list()
+    for device_id in policyrule_spec.deviceList:
+        device_uuid = device_get_uuid(device_id, allow_random=False)
+        if device_uuid in device_uuids: continue
+        related_devices.append({
+            'policyrule_uuid': policyrule_uuid,
+            'device_uuid'    : device_uuid,
+        })
+        device_uuids.add(device_uuid)
+
+    def callback(session : Session) -> bool:
+        stmt = insert(PolicyRuleModel).values(policyrule_data)
+        stmt = stmt.on_conflict_do_update(
+            index_elements=[PolicyRuleModel.policyrule_uuid],
+            set_=dict(
+                policyrule_state         = stmt.excluded.policyrule_state,
+                policyrule_state_message = stmt.excluded.policyrule_state_message,
+                policyrule_priority      = stmt.excluded.policyrule_priority,
+                policyrule_eca_data      = stmt.excluded.policyrule_eca_data,
+                updated_at               = stmt.excluded.updated_at,
+            )
+        )
+        stmt = stmt.returning(PolicyRuleModel.created_at, PolicyRuleModel.updated_at)
+        created_at,updated_at = session.execute(stmt).fetchone()
+        updated = updated_at > created_at
+
+        if len(related_devices) > 0:
+            session.execute(insert(PolicyRuleDeviceModel).values(related_devices).on_conflict_do_nothing(
+                index_elements=[PolicyRuleDeviceModel.policyrule_uuid, PolicyRuleDeviceModel.device_uuid]
+            ))
+
+        return updated
+
+    updated = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_policyrule_id(policyrule_uuid),updated
+
+def policyrule_delete(db_engine : Engine, request : PolicyRuleId) -> Tuple[Dict, bool]:
+    policyrule_uuid = policyrule_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> bool:
+        num_deleted = session.query(PolicyRuleModel).filter_by(policyrule_uuid=policyrule_uuid).delete()
+        return num_deleted > 0
+    deleted = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_policyrule_id(policyrule_uuid),deleted
diff --git a/src/context/service/database/RelationModels.py b/src/context/service/database/RelationModels.py
deleted file mode 100644
index 98b077a774616f69f4ae6a620e53a1592bb4f6e2..0000000000000000000000000000000000000000
--- a/src/context/service/database/RelationModels.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.model.Model import Model
-from .ConnectionModel import ConnectionModel
-from .DeviceModel import DeviceModel
-from .EndPointModel import EndPointModel
-from .LinkModel import LinkModel
-from .ServiceModel import ServiceModel
-from .SliceModel import SliceModel
-from .TopologyModel import TopologyModel
-
-LOGGER = logging.getLogger(__name__)
-
-class ConnectionSubServiceModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    connection_fk = ForeignKeyField(ConnectionModel)
-    sub_service_fk = ForeignKeyField(ServiceModel)
-
-class LinkEndPointModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    link_fk = ForeignKeyField(LinkModel)
-    endpoint_fk = ForeignKeyField(EndPointModel)
-
-class ServiceEndPointModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    service_fk = ForeignKeyField(ServiceModel)
-    endpoint_fk = ForeignKeyField(EndPointModel)
-
-class SliceEndPointModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    slice_fk = ForeignKeyField(SliceModel)
-    endpoint_fk = ForeignKeyField(EndPointModel)
-
-class SliceServiceModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    slice_fk = ForeignKeyField(SliceModel)
-    service_fk = ForeignKeyField(ServiceModel)
-
-class SliceSubSliceModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    slice_fk = ForeignKeyField(SliceModel)
-    sub_slice_fk = ForeignKeyField(SliceModel)
-
-class TopologyDeviceModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    topology_fk = ForeignKeyField(TopologyModel)
-    device_fk = ForeignKeyField(DeviceModel)
-
-class TopologyLinkModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    topology_fk = ForeignKeyField(TopologyModel)
-    link_fk = ForeignKeyField(LinkModel)
diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py
new file mode 100644
index 0000000000000000000000000000000000000000..76a83053587aa8beb44c4d96771c3cfa46945b07
--- /dev/null
+++ b/src/context/service/database/Service.py
@@ -0,0 +1,145 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, logging
+from sqlalchemy.dialects.postgresql import insert
+from sqlalchemy.engine import Engine
+from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy_cockroachdb import run_transaction
+from typing import Dict, List, Optional, Tuple
+from common.proto.context_pb2 import ContextId, Service, ServiceId
+from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Service import json_service_id
+from context.service.database.ConfigRule import compose_config_rules_data, upsert_config_rules
+from context.service.database.Constraint import compose_constraints_data, upsert_constraints
+from .models.enums.ServiceStatus import grpc_to_enum__service_status
+from .models.enums.ServiceType import grpc_to_enum__service_type
+from .models.ServiceModel import ServiceModel, ServiceEndPointModel
+from .uuids.Context import context_get_uuid
+from .uuids.EndPoint import endpoint_get_uuid
+from .uuids.Service import service_get_uuid
+
+LOGGER = logging.getLogger(__name__)
+
+def service_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
+    context_uuid = context_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[ServiceModel] = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all()
+        return [obj.dump_id() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def service_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
+    context_uuid = context_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[ServiceModel] = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all()
+        return [obj.dump() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def service_get(db_engine : Engine, request : ServiceId) -> Dict:
+    _,service_uuid = service_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> Optional[Dict]:
+        obj : Optional[ServiceModel] = session.query(ServiceModel).filter_by(service_uuid=service_uuid).one_or_none()
+        return None if obj is None else obj.dump()
+    obj = run_transaction(sessionmaker(bind=db_engine), callback)
+    if obj is None:
+        context_uuid = context_get_uuid(request.context_id, allow_random=False)
+        raw_service_uuid = '{:s}/{:s}'.format(request.context_id.context_uuid.uuid, request.service_uuid.uuid)
+        raise NotFoundException('Service', raw_service_uuid, extra_details=[
+            'context_uuid generated was: {:s}'.format(context_uuid),
+            'service_uuid generated was: {:s}'.format(service_uuid),
+        ])
+    return obj
+
+def service_set(db_engine : Engine, request : Service) -> Tuple[Dict, bool]:
+    raw_context_uuid = request.service_id.context_id.context_uuid.uuid
+    raw_service_uuid = request.service_id.service_uuid.uuid
+    raw_service_name = request.name
+    service_name = raw_service_uuid if len(raw_service_name) == 0 else raw_service_name
+    context_uuid,service_uuid = service_get_uuid(request.service_id, service_name=service_name, allow_random=True)
+
+    service_type = grpc_to_enum__service_type(request.service_type)
+    service_status = grpc_to_enum__service_status(request.service_status.service_status)
+
+    now = datetime.datetime.utcnow()
+
+    service_endpoints_data : List[Dict] = list()
+    for i,endpoint_id in enumerate(request.service_endpoint_ids):
+        endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+        if len(endpoint_context_uuid) == 0: endpoint_context_uuid = context_uuid
+        if endpoint_context_uuid not in {raw_context_uuid, context_uuid}:
+            raise InvalidArgumentException(
+                'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
+                endpoint_context_uuid,
+                ['should be == request.service_id.context_id.context_uuid.uuid({:s})'.format(raw_context_uuid)])
+
+        _, _, endpoint_uuid = endpoint_get_uuid(endpoint_id, allow_random=False)
+        service_endpoints_data.append({
+            'service_uuid' : service_uuid,
+            'endpoint_uuid': endpoint_uuid,
+        })
+
+    constraints = compose_constraints_data(request.service_constraints, now, service_uuid=service_uuid)
+    config_rules = compose_config_rules_data(request.service_config.config_rules, now, service_uuid=service_uuid)
+
+    service_data = [{
+        'context_uuid'  : context_uuid,
+        'service_uuid'  : service_uuid,
+        'service_name'  : service_name,
+        'service_type'  : service_type,
+        'service_status': service_status,
+        'created_at'    : now,
+        'updated_at'    : now,
+    }]
+
+    def callback(session : Session) -> bool:
+        stmt = insert(ServiceModel).values(service_data)
+        stmt = stmt.on_conflict_do_update(
+            index_elements=[ServiceModel.service_uuid],
+            set_=dict(
+                service_name   = stmt.excluded.service_name,
+                service_type   = stmt.excluded.service_type,
+                service_status = stmt.excluded.service_status,
+                updated_at     = stmt.excluded.updated_at,
+            )
+        )
+        stmt = stmt.returning(ServiceModel.created_at, ServiceModel.updated_at)
+        created_at,updated_at = session.execute(stmt).fetchone()
+        updated = updated_at > created_at
+
+        if len(service_endpoints_data) > 0:
+            stmt = insert(ServiceEndPointModel).values(service_endpoints_data)
+            stmt = stmt.on_conflict_do_nothing(
+                index_elements=[ServiceEndPointModel.service_uuid, ServiceEndPointModel.endpoint_uuid]
+            )
+            session.execute(stmt)
+
+        constraint_updates = upsert_constraints(session, constraints, service_uuid=service_uuid)
+        updated = updated or any([(updated_at > created_at) for created_at,updated_at in constraint_updates])
+
+        configrule_updates = upsert_config_rules(session, config_rules, service_uuid=service_uuid)
+        updated = updated or any([(updated_at > created_at) for created_at,updated_at in configrule_updates])
+
+        return updated
+
+    updated = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_service_id(service_uuid, json_context_id(context_uuid)),updated
+
+def service_delete(db_engine : Engine, request : ServiceId) -> Tuple[Dict, bool]:
+    context_uuid,service_uuid = service_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> bool:
+        num_deleted = session.query(ServiceModel).filter_by(service_uuid=service_uuid).delete()
+        return num_deleted > 0
+    deleted = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_service_id(service_uuid, json_context_id(context_uuid)),deleted
diff --git a/src/context/service/database/ServiceModel.py b/src/context/service/database/ServiceModel.py
deleted file mode 100644
index 8b32d1cc9eeec248d1097f972df93dbd2c0882fa..0000000000000000000000000000000000000000
--- a/src/context/service/database/ServiceModel.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import functools, logging, operator
-from enum import Enum
-from typing import Dict, List
-from common.orm.fields.EnumeratedField import EnumeratedField
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-from common.orm.HighLevel import get_related_objects
-from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum
-from .ConfigModel import ConfigModel
-from .ConstraintModel import ConstraintsModel
-from .ContextModel import ContextModel
-from .Tools import grpc_to_enum
-
-LOGGER = logging.getLogger(__name__)
-
-class ORM_ServiceTypeEnum(Enum):
-    UNKNOWN                   = ServiceTypeEnum.SERVICETYPE_UNKNOWN
-    L3NM                      = ServiceTypeEnum.SERVICETYPE_L3NM
-    L2NM                      = ServiceTypeEnum.SERVICETYPE_L2NM
-    TAPI_CONNECTIVITY_SERVICE = ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE
-
-grpc_to_enum__service_type = functools.partial(
-    grpc_to_enum, ServiceTypeEnum, ORM_ServiceTypeEnum)
-
-class ORM_ServiceStatusEnum(Enum):
-    UNDEFINED       = ServiceStatusEnum.SERVICESTATUS_UNDEFINED
-    PLANNED         = ServiceStatusEnum.SERVICESTATUS_PLANNED
-    ACTIVE          = ServiceStatusEnum.SERVICESTATUS_ACTIVE
-    PENDING_REMOVAL = ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL
-
-grpc_to_enum__service_status = functools.partial(
-    grpc_to_enum, ServiceStatusEnum, ORM_ServiceStatusEnum)
-
-class ServiceModel(Model):
-    pk = PrimaryKeyField()
-    context_fk = ForeignKeyField(ContextModel)
-    service_uuid = StringField(required=True, allow_empty=False)
-    service_type = EnumeratedField(ORM_ServiceTypeEnum, required=True)
-    service_constraints_fk = ForeignKeyField(ConstraintsModel)
-    service_status = EnumeratedField(ORM_ServiceStatusEnum, required=True)
-    service_config_fk = ForeignKeyField(ConfigModel)
-
-    def delete(self) -> None:
-        #pylint: disable=import-outside-toplevel
-        from .RelationModels import ServiceEndPointModel
-
-        for db_service_endpoint_pk,_ in self.references(ServiceEndPointModel):
-            ServiceEndPointModel(self.database, db_service_endpoint_pk).delete()
-
-        super().delete()
-
-        ConfigModel(self.database, self.service_config_fk).delete()
-        ConstraintsModel(self.database, self.service_constraints_fk).delete()
-
-    def dump_id(self) -> Dict:
-        context_id = ContextModel(self.database, self.context_fk).dump_id()
-        return {
-            'context_id': context_id,
-            'service_uuid': {'uuid': self.service_uuid},
-        }
-
-    def dump_endpoint_ids(self) -> List[Dict]:
-        from .RelationModels import ServiceEndPointModel # pylint: disable=import-outside-toplevel
-        db_endpoints = get_related_objects(self, ServiceEndPointModel, 'endpoint_fk')
-        return [db_endpoint.dump_id() for db_endpoint in sorted(db_endpoints, key=operator.attrgetter('pk'))]
-
-    def dump_constraints(self) -> List[Dict]:
-        return ConstraintsModel(self.database, self.service_constraints_fk).dump()
-
-    def dump_config(self) -> Dict:
-        return ConfigModel(self.database, self.service_config_fk).dump()
-
-    def dump(   # pylint: disable=arguments-differ
-            self, include_endpoint_ids=True, include_constraints=True, include_config_rules=True
-        ) -> Dict:
-        result = {
-            'service_id': self.dump_id(),
-            'service_type': self.service_type.value,
-            'service_status': {'service_status': self.service_status.value},
-        }
-        if include_endpoint_ids: result['service_endpoint_ids'] = self.dump_endpoint_ids()
-        if include_constraints: result['service_constraints'] = self.dump_constraints()
-        if include_config_rules: result.setdefault('service_config', {})['config_rules'] = self.dump_config()
-        return result
diff --git a/src/context/service/database/Slice.py b/src/context/service/database/Slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..84bfff34391ada943fc61caaa0789e8e4d8e270f
--- /dev/null
+++ b/src/context/service/database/Slice.py
@@ -0,0 +1,234 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, logging
+from sqlalchemy import and_
+from sqlalchemy.dialects.postgresql import insert
+from sqlalchemy.engine import Engine
+from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy_cockroachdb import run_transaction
+from typing import Dict, List, Optional, Set, Tuple
+from common.proto.context_pb2 import ContextId, Slice, SliceId
+from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Slice import json_slice_id
+from context.service.database.ConfigRule import compose_config_rules_data, upsert_config_rules
+from context.service.database.Constraint import compose_constraints_data, upsert_constraints
+from .models.enums.SliceStatus import grpc_to_enum__slice_status
+from .models.SliceModel import SliceModel, SliceEndPointModel, SliceServiceModel, SliceSubSliceModel
+from .uuids.Context import context_get_uuid
+from .uuids.EndPoint import endpoint_get_uuid
+from .uuids.Service import service_get_uuid
+from .uuids.Slice import slice_get_uuid
+
+LOGGER = logging.getLogger(__name__)
+
+def slice_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
+    context_uuid = context_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[SliceModel] = session.query(SliceModel).filter_by(context_uuid=context_uuid).all()
+        return [obj.dump_id() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def slice_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
+    context_uuid = context_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[SliceModel] = session.query(SliceModel).filter_by(context_uuid=context_uuid).all()
+        return [obj.dump() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def slice_get(db_engine : Engine, request : SliceId) -> Dict:
+    _,slice_uuid = slice_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> Optional[Dict]:
+        obj : Optional[SliceModel] = session.query(SliceModel).filter_by(slice_uuid=slice_uuid).one_or_none()
+        return None if obj is None else obj.dump()
+    obj = run_transaction(sessionmaker(bind=db_engine), callback)
+    if obj is None:
+        context_uuid = context_get_uuid(request.context_id, allow_random=False)
+        raw_slice_uuid = '{:s}/{:s}'.format(request.context_id.context_uuid.uuid, request.slice_uuid.uuid)
+        raise NotFoundException('Slice', raw_slice_uuid, extra_details=[
+            'context_uuid generated was: {:s}'.format(context_uuid),
+            'slice_uuid generated was: {:s}'.format(slice_uuid),
+        ])
+    return obj
+
+def slice_set(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]:
+    raw_context_uuid = request.slice_id.context_id.context_uuid.uuid
+    raw_slice_uuid = request.slice_id.slice_uuid.uuid
+    raw_slice_name = request.name
+    slice_name = raw_slice_uuid if len(raw_slice_name) == 0 else raw_slice_name
+    context_uuid,slice_uuid = slice_get_uuid(request.slice_id, slice_name=slice_name, allow_random=True)
+
+    slice_status = grpc_to_enum__slice_status(request.slice_status.slice_status)
+
+    now = datetime.datetime.utcnow()
+
+    slice_endpoints_data : List[Dict] = list()
+    for i,endpoint_id in enumerate(request.slice_endpoint_ids):
+        endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+        if len(endpoint_context_uuid) == 0: endpoint_context_uuid = context_uuid
+        if endpoint_context_uuid not in {raw_context_uuid, context_uuid}:
+            raise InvalidArgumentException(
+                'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
+                endpoint_context_uuid,
+                ['should be == request.slice_id.context_id.context_uuid.uuid({:s})'.format(raw_context_uuid)])
+
+        _, _, endpoint_uuid = endpoint_get_uuid(endpoint_id, allow_random=False)
+        slice_endpoints_data.append({
+            'slice_uuid'   : slice_uuid,
+            'endpoint_uuid': endpoint_uuid,
+        })
+
+    slice_services_data : List[Dict] = list()
+    for i,service_id in enumerate(request.slice_service_ids):
+        _, service_uuid = service_get_uuid(service_id, allow_random=False)
+        slice_services_data.append({
+            'slice_uuid'  : slice_uuid,
+            'service_uuid': service_uuid,
+        })
+
+    slice_subslices_data : List[Dict] = list()
+    for i,subslice_id in enumerate(request.slice_subslice_ids):
+        _, subslice_uuid = slice_get_uuid(subslice_id, allow_random=False)
+        slice_subslices_data.append({
+            'slice_uuid'   : slice_uuid,
+            'subslice_uuid': subslice_uuid,
+        })
+
+    constraints = compose_constraints_data(request.slice_constraints, now, slice_uuid=slice_uuid)
+    config_rules = compose_config_rules_data(request.slice_config.config_rules, now, slice_uuid=slice_uuid)
+
+    slice_data = [{
+        'context_uuid'      : context_uuid,
+        'slice_uuid'        : slice_uuid,
+        'slice_name'        : slice_name,
+        'slice_status'      : slice_status,
+        'slice_owner_uuid'  : request.slice_owner.owner_uuid.uuid,
+        'slice_owner_string': request.slice_owner.owner_string,
+        'created_at'        : now,
+        'updated_at'        : now,
+    }]
+
+    def callback(session : Session) -> bool:
+        stmt = insert(SliceModel).values(slice_data)
+        stmt = stmt.on_conflict_do_update(
+            index_elements=[SliceModel.slice_uuid],
+            set_=dict(
+                slice_name         = stmt.excluded.slice_name,
+                slice_status       = stmt.excluded.slice_status,
+                updated_at         = stmt.excluded.updated_at,
+                slice_owner_uuid   = stmt.excluded.slice_owner_uuid,
+                slice_owner_string = stmt.excluded.slice_owner_string,
+            )
+        )
+        stmt = stmt.returning(SliceModel.created_at, SliceModel.updated_at)
+        created_at,updated_at = session.execute(stmt).fetchone()
+        updated = updated_at > created_at
+
+        if len(slice_endpoints_data) > 0:
+            stmt = insert(SliceEndPointModel).values(slice_endpoints_data)
+            stmt = stmt.on_conflict_do_nothing(
+                index_elements=[SliceEndPointModel.slice_uuid, SliceEndPointModel.endpoint_uuid]
+            )
+            session.execute(stmt)
+
+        if len(slice_services_data) > 0:
+            stmt = insert(SliceServiceModel).values(slice_services_data)
+            stmt = stmt.on_conflict_do_nothing(
+                index_elements=[SliceServiceModel.slice_uuid, SliceServiceModel.service_uuid]
+            )
+            session.execute(stmt)
+
+        if len(slice_subslices_data) > 0:
+            stmt = insert(SliceSubSliceModel).values(slice_subslices_data)
+            stmt = stmt.on_conflict_do_nothing(
+                index_elements=[SliceSubSliceModel.slice_uuid, SliceSubSliceModel.subslice_uuid]
+            )
+            session.execute(stmt)
+
+        constraint_updates = upsert_constraints(session, constraints, slice_uuid=slice_uuid)
+        updated = updated or any([(updated_at > created_at) for created_at,updated_at in constraint_updates])
+
+        configrule_updates = upsert_config_rules(session, config_rules, slice_uuid=slice_uuid)
+        updated = updated or any([(updated_at > created_at) for created_at,updated_at in configrule_updates])
+
+        return updated
+
+    updated = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_slice_id(slice_uuid, json_context_id(context_uuid)),updated
+
+def slice_unset(db_engine : Engine, request : Slice) -> Tuple[Dict, bool]:
+    raw_context_uuid = request.slice_id.context_id.context_uuid.uuid
+    raw_slice_uuid = request.slice_id.slice_uuid.uuid
+    raw_slice_name = request.name
+    slice_name = raw_slice_uuid if len(raw_slice_name) == 0 else raw_slice_name
+    context_uuid,slice_uuid = slice_get_uuid(request.slice_id, slice_name=slice_name, allow_random=False)
+
+    if len(request.slice_constraints) > 0:         raise NotImplementedError('UnsetSlice: removal of constraints')
+    if len(request.slice_config.config_rules) > 0: raise NotImplementedError('UnsetSlice: removal of config rules')
+    if len(request.slice_endpoint_ids) > 0:        raise NotImplementedError('UnsetSlice: removal of endpoints')
+
+    slice_endpoint_uuids : Set[str] = set()
+    for i,endpoint_id in enumerate(request.slice_endpoint_ids):
+        endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+        if len(endpoint_context_uuid) == 0: endpoint_context_uuid = context_uuid
+        if endpoint_context_uuid not in {raw_context_uuid, context_uuid}:
+            raise InvalidArgumentException(
+                'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
+                endpoint_context_uuid,
+                ['should be == request.slice_id.context_id.context_uuid.uuid({:s})'.format(raw_context_uuid)])
+        slice_endpoint_uuids.add(endpoint_get_uuid(endpoint_id, allow_random=False)[2])
+
+    slice_service_uuids : Set[str] = {
+        service_get_uuid(service_id, allow_random=False)[1]
+        for service_id in request.slice_service_ids
+    }
+
+    slice_subslice_uuids : Set[str] = {
+        slice_get_uuid(subslice_id, allow_random=False)[1]
+        for subslice_id in request.slice_subslice_ids
+    }
+
+    def callback(session : Session) -> bool:
+        num_deletes = 0
+        if len(slice_service_uuids) > 0:
+            num_deletes += session.query(SliceServiceModel)\
+                .filter(and_(
+                    SliceServiceModel.slice_uuid == slice_uuid,
+                    SliceServiceModel.service_uuid.in_(slice_service_uuids)
+                )).delete()
+        if len(slice_subslice_uuids) > 0:
+            num_deletes += session.query(SliceSubSliceModel)\
+                .filter_by(and_(
+                    SliceSubSliceModel.slice_uuid == slice_uuid,
+                    SliceSubSliceModel.subslice_uuid.in_(slice_subslice_uuids)
+                )).delete()
+        if len(slice_endpoint_uuids) > 0:
+            num_deletes += session.query(SliceEndPointModel)\
+                .filter_by(and_(
+                    SliceEndPointModel.slice_uuid == slice_uuid,
+                    SliceEndPointModel.endpoint_uuid.in_(slice_endpoint_uuids)
+                )).delete()
+        return num_deletes > 0
+
+    updated = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_slice_id(slice_uuid, json_context_id(context_uuid)),updated
+
+def slice_delete(db_engine : Engine, request : SliceId) -> Tuple[Dict, bool]:
+    context_uuid,slice_uuid = slice_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> bool:
+        num_deleted = session.query(SliceModel).filter_by(slice_uuid=slice_uuid).delete()
+        return num_deleted > 0
+    deleted = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_slice_id(slice_uuid, json_context_id(context_uuid)),deleted
diff --git a/src/context/service/database/SliceModel.py b/src/context/service/database/SliceModel.py
deleted file mode 100644
index 74bb60b401f656fdcfec8b0466019f87a8f1b41e..0000000000000000000000000000000000000000
--- a/src/context/service/database/SliceModel.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import functools, logging, operator
-from enum import Enum
-from typing import Dict, List
-from common.orm.fields.EnumeratedField import EnumeratedField
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-from common.orm.HighLevel import get_related_objects
-from common.proto.context_pb2 import SliceStatusEnum
-from .ConfigModel import ConfigModel
-from .ConstraintModel import ConstraintsModel
-from .ContextModel import ContextModel
-from .Tools import grpc_to_enum
-
-LOGGER = logging.getLogger(__name__)
-
-class ORM_SliceStatusEnum(Enum):
-    UNDEFINED = SliceStatusEnum.SLICESTATUS_UNDEFINED
-    PLANNED   = SliceStatusEnum.SLICESTATUS_PLANNED
-    INIT      = SliceStatusEnum.SLICESTATUS_INIT
-    ACTIVE    = SliceStatusEnum.SLICESTATUS_ACTIVE
-    DEINIT    = SliceStatusEnum.SLICESTATUS_DEINIT
-
-grpc_to_enum__slice_status = functools.partial(
-    grpc_to_enum, SliceStatusEnum, ORM_SliceStatusEnum)
-
-class SliceModel(Model):
-    pk = PrimaryKeyField()
-    context_fk = ForeignKeyField(ContextModel)
-    slice_uuid = StringField(required=True, allow_empty=False)
-    slice_constraints_fk = ForeignKeyField(ConstraintsModel)
-    slice_status = EnumeratedField(ORM_SliceStatusEnum, required=True)
-    slice_config_fk = ForeignKeyField(ConfigModel)
-    slice_owner_uuid = StringField(required=False, allow_empty=True)
-    slice_owner_string = StringField(required=False, allow_empty=True)
-
-    def delete(self) -> None:
-        # pylint: disable=import-outside-toplevel
-        from .RelationModels import SliceEndPointModel, SliceServiceModel, SliceSubSliceModel
-
-        for db_slice_endpoint_pk,_ in self.references(SliceEndPointModel):
-            SliceEndPointModel(self.database, db_slice_endpoint_pk).delete()
-
-        for db_slice_service_pk,_ in self.references(SliceServiceModel):
-            SliceServiceModel(self.database, db_slice_service_pk).delete()
-
-        for db_slice_subslice_pk,_ in self.references(SliceSubSliceModel):
-            SliceSubSliceModel(self.database, db_slice_subslice_pk).delete()
-
-        super().delete()
-
-        ConfigModel(self.database, self.slice_config_fk).delete()
-        ConstraintsModel(self.database, self.slice_constraints_fk).delete()
-
-    def dump_id(self) -> Dict:
-        context_id = ContextModel(self.database, self.context_fk).dump_id()
-        return {
-            'context_id': context_id,
-            'slice_uuid': {'uuid': self.slice_uuid},
-        }
-
-    def dump_endpoint_ids(self) -> List[Dict]:
-        from .RelationModels import SliceEndPointModel # pylint: disable=import-outside-toplevel
-        db_endpoints = get_related_objects(self, SliceEndPointModel, 'endpoint_fk')
-        return [db_endpoint.dump_id() for db_endpoint in sorted(db_endpoints, key=operator.attrgetter('pk'))]
-
-    def dump_constraints(self) -> List[Dict]:
-        return ConstraintsModel(self.database, self.slice_constraints_fk).dump()
-
-    def dump_config(self) -> Dict:
-        return ConfigModel(self.database, self.slice_config_fk).dump()
-
-    def dump_service_ids(self) -> List[Dict]:
-        from .RelationModels import SliceServiceModel # pylint: disable=import-outside-toplevel
-        db_services = get_related_objects(self, SliceServiceModel, 'service_fk')
-        return [db_service.dump_id() for db_service in sorted(db_services, key=operator.attrgetter('pk'))]
-
-    def dump_subslice_ids(self) -> List[Dict]:
-        from .RelationModels import SliceSubSliceModel # pylint: disable=import-outside-toplevel
-        db_subslices = get_related_objects(self, SliceSubSliceModel, 'sub_slice_fk')
-        return [
-            db_subslice.dump_id()
-            for db_subslice in sorted(db_subslices, key=operator.attrgetter('pk'))
-            if db_subslice.pk != self.pk # if I'm subslice of other slice, I will appear as subslice of myself
-        ]
-
-    def dump(   # pylint: disable=arguments-differ
-            self, include_endpoint_ids=True, include_constraints=True, include_config_rules=True,
-            include_service_ids=True, include_subslice_ids=True
-        ) -> Dict:
-        result = {
-            'slice_id': self.dump_id(),
-            'slice_status': {'slice_status': self.slice_status.value},
-        }
-        if include_endpoint_ids: result['slice_endpoint_ids'] = self.dump_endpoint_ids()
-        if include_constraints: result['slice_constraints'] = self.dump_constraints()
-        if include_config_rules: result.setdefault('slice_config', {})['config_rules'] = self.dump_config()
-        if include_service_ids: result['slice_service_ids'] = self.dump_service_ids()
-        if include_subslice_ids: result['slice_subslice_ids'] = self.dump_subslice_ids()
-
-        if len(self.slice_owner_uuid) > 0:
-            result.setdefault('slice_owner', {}).setdefault('owner_uuid', {})['uuid'] = self.slice_owner_uuid
-
-        if len(self.slice_owner_string) > 0:
-            result.setdefault('slice_owner', {})['owner_string'] = self.slice_owner_string
-
-        return result
diff --git a/src/context/service/database/Tools.py b/src/context/service/database/Tools.py
deleted file mode 100644
index 43bb71bd90582644c67d3ca528611eae937b6460..0000000000000000000000000000000000000000
--- a/src/context/service/database/Tools.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import hashlib, re
-from enum import Enum
-from typing import Dict, List, Tuple, Union
-
-# Convenient helper function to remove dictionary items in dict/list/set comprehensions.
-
-def remove_dict_key(dictionary : Dict, key : str):
-    dictionary.pop(key, None)
-    return dictionary
-
-# Enumeration classes are redundant with gRPC classes, but gRPC does not provide a programmatical method to retrieve
-# the values it expects from strings containing the desired value symbol or its integer value, so a kind of mapping is
-# required. Besides, ORM Models expect Enum classes in EnumeratedFields; we create specific and conveniently defined
-# Enum classes to serve both purposes.
-
-def grpc_to_enum(grpc_enum_class, orm_enum_class : Enum, grpc_enum_value):
-    grpc_enum_name = grpc_enum_class.Name(grpc_enum_value)
-    grpc_enum_prefix = orm_enum_class.__name__.upper()
-    grpc_enum_prefix = re.sub(r'^ORM_(.+)$', r'\1', grpc_enum_prefix)
-    grpc_enum_prefix = re.sub(r'^(.+)ENUM$', r'\1', grpc_enum_prefix)
-    grpc_enum_prefix = grpc_enum_prefix + '_'
-    orm_enum_name = grpc_enum_name.replace(grpc_enum_prefix, '')
-    orm_enum_value = orm_enum_class._member_map_.get(orm_enum_name) # pylint: disable=protected-access
-    return orm_enum_value
-
-# For some models, it is convenient to produce a string hash for fast comparisons of existence or modification. Method
-# fast_hasher computes configurable length (between 1 and 64 byte) hashes and retrieves them in hex representation.
-
-FASTHASHER_ITEM_ACCEPTED_FORMAT = 'Union[bytes, str]'
-FASTHASHER_DATA_ACCEPTED_FORMAT = 'Union[{fmt:s}, List[{fmt:s}], Tuple[{fmt:s}]]'.format(
-    fmt=FASTHASHER_ITEM_ACCEPTED_FORMAT)
-
-def fast_hasher(data : Union[bytes, str, List[Union[bytes, str]], Tuple[Union[bytes, str]]], digest_size : int = 8):
-    hasher = hashlib.blake2b(digest_size=digest_size)
-    # Do not accept sets, dicts, or other unordered dats tructures since their order is arbitrary thus producing
-    # different hashes depending on the order. Consider adding support for sets or dicts with previous sorting of
-    # items by their key.
-
-    if isinstance(data, bytes):
-        data = [data]
-    elif isinstance(data, str):
-        data = [data.encode('UTF-8')]
-    elif isinstance(data, (list, tuple)):
-        pass
-    else:
-        msg = 'data({:s}) must be {:s}, found {:s}'
-        raise TypeError(msg.format(str(data), FASTHASHER_DATA_ACCEPTED_FORMAT, str(type(data))))
-
-    for i,item in enumerate(data):
-        if isinstance(item, str):
-            item = item.encode('UTF-8')
-        elif isinstance(item, bytes):
-            pass
-        else:
-            msg = 'data[{:d}]({:s}) must be {:s}, found {:s}'
-            raise TypeError(msg.format(i, str(item), FASTHASHER_ITEM_ACCEPTED_FORMAT, str(type(item))))
-        hasher.update(item)
-    return hasher.hexdigest()
diff --git a/src/context/service/database/Topology.py b/src/context/service/database/Topology.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcd93e6bb2500c370b5e0e9a206e7d7117507b88
--- /dev/null
+++ b/src/context/service/database/Topology.py
@@ -0,0 +1,107 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, logging
+from sqlalchemy.dialects.postgresql import insert
+from sqlalchemy.engine import Engine
+from sqlalchemy.orm import Session, sessionmaker
+from sqlalchemy_cockroachdb import run_transaction
+from typing import Dict, List, Optional, Tuple
+from common.proto.context_pb2 import ContextId, Topology, TopologyId
+from common.method_wrappers.ServiceExceptions import NotFoundException
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Topology import json_topology_id
+from .models.TopologyModel import TopologyModel
+from .uuids.Context import context_get_uuid
+from .uuids.Topology import topology_get_uuid
+
+LOGGER = logging.getLogger(__name__)
+
+def topology_list_ids(db_engine : Engine, request : ContextId) -> List[Dict]:
+    context_uuid = context_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all()
+        return [obj.dump_id() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def topology_list_objs(db_engine : Engine, request : ContextId) -> List[Dict]:
+    context_uuid = context_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[TopologyModel] = session.query(TopologyModel).filter_by(context_uuid=context_uuid).all()
+        return [obj.dump() for obj in obj_list]
+    return run_transaction(sessionmaker(bind=db_engine), callback)
+
+def topology_get(db_engine : Engine, request : TopologyId) -> Dict:
+    _,topology_uuid = topology_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> Optional[Dict]:
+        obj : Optional[TopologyModel] = session.query(TopologyModel)\
+            .filter_by(topology_uuid=topology_uuid).one_or_none()
+        return None if obj is None else obj.dump()
+    obj = run_transaction(sessionmaker(bind=db_engine), callback)
+    if obj is None:
+        context_uuid = context_get_uuid(request.context_id, allow_random=False)
+        raw_topology_uuid = '{:s}/{:s}'.format(request.context_id.context_uuid.uuid, request.topology_uuid.uuid)
+        raise NotFoundException('Topology', raw_topology_uuid, extra_details=[
+            'context_uuid generated was: {:s}'.format(context_uuid),
+            'topology_uuid generated was: {:s}'.format(topology_uuid),
+        ])
+    return obj
+
+def topology_set(db_engine : Engine, request : Topology) -> Tuple[Dict, bool]:
+    topology_name = request.name
+    if len(topology_name) == 0: topology_name = request.topology_id.topology_uuid.uuid
+    context_uuid,topology_uuid = topology_get_uuid(request.topology_id, topology_name=topology_name, allow_random=True)
+
+    # Ignore request.device_ids and request.link_ids. They are used for retrieving devices and links added into the
+    # topology. Explicit addition into the topology is done automatically when creating the devices and links, based
+    # on the topologies specified in the endpoints associated with the devices and links.
+
+    if len(request.device_ids) > 0:   # pragma: no cover
+        LOGGER.warning('Items in field "device_ids" ignored. This field is used for retrieval purposes only.')
+
+    if len(request.link_ids) > 0:    # pragma: no cover
+        LOGGER.warning('Items in field "link_ids" ignored. This field is used for retrieval purposes only.')
+
+    now = datetime.datetime.utcnow()
+    topology_data = [{
+        'context_uuid' : context_uuid,
+        'topology_uuid': topology_uuid,
+        'topology_name': topology_name,
+        'created_at'   : now,
+        'updated_at'   : now,
+    }]
+
+    def callback(session : Session) -> bool:
+        stmt = insert(TopologyModel).values(topology_data)
+        stmt = stmt.on_conflict_do_update(
+            index_elements=[TopologyModel.topology_uuid],
+            set_=dict(
+                topology_name = stmt.excluded.topology_name,
+                updated_at    = stmt.excluded.updated_at,
+            )
+        )
+        stmt = stmt.returning(TopologyModel.created_at, TopologyModel.updated_at)
+        created_at,updated_at = session.execute(stmt).fetchone()
+        return updated_at > created_at
+    
+    updated = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)),updated
+
+def topology_delete(db_engine : Engine, request : TopologyId) -> Tuple[Dict, bool]:
+    context_uuid,topology_uuid = topology_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> bool:
+        num_deleted = session.query(TopologyModel).filter_by(topology_uuid=topology_uuid).delete()
+        return num_deleted > 0
+    deleted = run_transaction(sessionmaker(bind=db_engine), callback)
+    return json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)),deleted
diff --git a/src/context/service/database/TopologyModel.py b/src/context/service/database/TopologyModel.py
deleted file mode 100644
index 5909c7a2c63d05f2cbde7f0d8555e63587e96682..0000000000000000000000000000000000000000
--- a/src/context/service/database/TopologyModel.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging, operator
-from typing import Dict, List
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-from common.orm.HighLevel import get_related_objects
-from .ContextModel import ContextModel
-
-LOGGER = logging.getLogger(__name__)
-
-class TopologyModel(Model):
-    pk = PrimaryKeyField()
-    context_fk = ForeignKeyField(ContextModel)
-    topology_uuid = StringField(required=True, allow_empty=False)
-
-    def dump_id(self) -> Dict:
-        context_id = ContextModel(self.database, self.context_fk).dump_id()
-        return {
-            'context_id': context_id,
-            'topology_uuid': {'uuid': self.topology_uuid},
-        }
-
-    def dump_device_ids(self) -> List[Dict]:
-        from .RelationModels import TopologyDeviceModel # pylint: disable=import-outside-toplevel
-        db_devices = get_related_objects(self, TopologyDeviceModel, 'device_fk')
-        return [db_device.dump_id() for db_device in sorted(db_devices, key=operator.attrgetter('pk'))]
-
-    def dump_link_ids(self) -> List[Dict]:
-        from .RelationModels import TopologyLinkModel # pylint: disable=import-outside-toplevel
-        db_links = get_related_objects(self, TopologyLinkModel, 'link_fk')
-        return [db_link.dump_id() for db_link in sorted(db_links, key=operator.attrgetter('pk'))]
-
-    def dump(   # pylint: disable=arguments-differ
-            self, include_devices=True, include_links=True
-        ) -> Dict:
-        result = {'topology_id': self.dump_id()}
-        if include_devices: result['device_ids'] = self.dump_device_ids()
-        if include_links: result['link_ids'] = self.dump_link_ids()
-        return result
diff --git a/src/context/service/database/__init__.py b/src/context/service/database/__init__.py
index 70a33251242c51f49140e596b8208a19dd5245f7..9953c820575d42fa88351cc8de022d880ba96e6a 100644
--- a/src/context/service/database/__init__.py
+++ b/src/context/service/database/__init__.py
@@ -11,4 +11,3 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa8b9c2d2ea899fcfdb0777bd6400a34517898d0
--- /dev/null
+++ b/src/context/service/database/models/ConfigRuleModel.py
@@ -0,0 +1,52 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum, json
+from sqlalchemy import CheckConstraint, Column, DateTime, Enum, ForeignKey, Integer, String
+from sqlalchemy.dialects.postgresql import UUID
+from typing import Dict
+from .enums.ConfigAction import ORM_ConfigActionEnum
+from ._Base import _Base
+
+# Enum values should match name of field in ConfigRule message
+class ConfigRuleKindEnum(enum.Enum):
+    CUSTOM = 'custom'
+    ACL    = 'acl'
+
+class ConfigRuleModel(_Base):
+    __tablename__ = 'configrule'
+
+    configrule_uuid = Column(UUID(as_uuid=False), primary_key=True)
+    device_uuid     = Column(ForeignKey('device.device_uuid',   ondelete='CASCADE'), nullable=True)
+    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True)
+    slice_uuid      = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE'), nullable=True)
+    position        = Column(Integer, nullable=False)
+    kind            = Column(Enum(ConfigRuleKindEnum), nullable=False)
+    action          = Column(Enum(ORM_ConfigActionEnum), nullable=False)
+    data            = Column(String, nullable=False)
+    created_at      = Column(DateTime, nullable=False)
+    updated_at      = Column(DateTime, nullable=False)
+
+    __table_args__ = (
+        CheckConstraint(position >= 0, name='check_position_value'),
+        #UniqueConstraint('device_uuid',  'position', name='unique_per_device' ),
+        #UniqueConstraint('service_uuid', 'position', name='unique_per_service'),
+        #UniqueConstraint('slice_uuid',   'position', name='unique_per_slice'  ),
+    )
+
+    def dump(self) -> Dict:
+        return {
+            'action': self.action.value,
+            self.kind.value: json.loads(self.data),
+        }
diff --git a/src/context/service/database/models/ConnectionModel.py b/src/context/service/database/models/ConnectionModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..966dcab4da419e9a0d0b24cd0340b37c5485cd91
--- /dev/null
+++ b/src/context/service/database/models/ConnectionModel.py
@@ -0,0 +1,76 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging, operator
+from sqlalchemy import Column, DateTime, ForeignKey, Integer, CheckConstraint, String
+from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy.orm import relationship
+from typing import Dict
+from ._Base import _Base
+
+LOGGER = logging.getLogger(__name__)
+
+class ConnectionModel(_Base):
+    __tablename__ = 'connection'
+
+    connection_uuid = Column(UUID(as_uuid=False), primary_key=True)
+    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=False)
+    settings        = Column(String, nullable=False)
+    created_at      = Column(DateTime, nullable=False)
+    updated_at      = Column(DateTime, nullable=False)
+
+    connection_service     = relationship('ServiceModel') # back_populates='connections'
+    connection_endpoints   = relationship('ConnectionEndPointModel') # lazy='joined', back_populates='connection'
+    connection_subservices = relationship('ConnectionSubServiceModel') # lazy='joined', back_populates='connection'
+
+    def dump_id(self) -> Dict:
+        return {'connection_uuid': {'uuid': self.connection_uuid}}
+
+    def dump(self) -> Dict:
+        return {
+            'connection_id'         : self.dump_id(),
+            'service_id'            : self.connection_service.dump_id(),
+            'settings'              : json.loads(self.settings),
+            'path_hops_endpoint_ids': [
+                c_ep.endpoint.dump_id()
+                for c_ep in sorted(self.connection_endpoints, key=operator.attrgetter('position'))
+            ],
+            'sub_service_ids'       : [
+                c_ss.subservice.dump_id()
+                for c_ss in self.connection_subservices
+            ],
+        }
+
+class ConnectionEndPointModel(_Base):
+    __tablename__ = 'connection_endpoint'
+
+    connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True)
+    endpoint_uuid   = Column(ForeignKey('endpoint.endpoint_uuid',     ondelete='RESTRICT'), primary_key=True)
+    position        = Column(Integer, nullable=False)
+
+    connection = relationship('ConnectionModel', back_populates='connection_endpoints', lazy='joined')
+    endpoint   = relationship('EndPointModel',   lazy='joined') # back_populates='connection_endpoints'
+
+    __table_args__ = (
+        CheckConstraint(position >= 0, name='check_position_value'),
+    )
+
+class ConnectionSubServiceModel(_Base):
+    __tablename__ = 'connection_subservice'
+
+    connection_uuid = Column(ForeignKey('connection.connection_uuid', ondelete='CASCADE' ), primary_key=True)
+    subservice_uuid = Column(ForeignKey('service.service_uuid',       ondelete='RESTRICT'), primary_key=True)
+
+    connection = relationship('ConnectionModel', back_populates='connection_subservices', lazy='joined')
+    subservice = relationship('ServiceModel',    lazy='joined') # back_populates='connection_subservices'
diff --git a/src/context/service/database/models/ConstraintModel.py b/src/context/service/database/models/ConstraintModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..51fc0b91df07a2c86c5e84692b3dd5edd21fb761
--- /dev/null
+++ b/src/context/service/database/models/ConstraintModel.py
@@ -0,0 +1,399 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum, json
+from sqlalchemy import CheckConstraint, Column, DateTime, Enum, ForeignKey, Integer, String
+from sqlalchemy.dialects.postgresql import UUID
+from typing import Dict
+from ._Base import _Base
+
+# Enum values should match name of field in Constraint message
+class ConstraintKindEnum(enum.Enum):
+    CUSTOM                        = 'custom'
+    ENDPOINT_LOCATION_REGION      = 'ep_loc_region'
+    ENDPOINT_LOCATION_GPSPOSITION = 'ep_loc_gpspos'
+    ENDPOINT_PRIORITY             = 'ep_priority'
+    SLA_AVAILABILITY              = 'sla_avail'
+
+class ConstraintModel(_Base):
+    __tablename__ = 'constraint'
+
+    constraint_uuid = Column(UUID(as_uuid=False), primary_key=True)
+    service_uuid    = Column(ForeignKey('service.service_uuid', ondelete='CASCADE'), nullable=True)
+    slice_uuid      = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE'), nullable=True)
+    position        = Column(Integer, nullable=False)
+    kind            = Column(Enum(ConstraintKindEnum), nullable=False)
+    data            = Column(String, nullable=False)
+    created_at      = Column(DateTime, nullable=False)
+    updated_at      = Column(DateTime, nullable=False)
+
+    __table_args__ = (
+        CheckConstraint(position >= 0, name='check_position_value'),
+        #UniqueConstraint('service_uuid', 'position', name='unique_per_service'),
+        #UniqueConstraint('slice_uuid',   'position', name='unique_per_slice'  ),
+    )
+
+    def dump(self) -> Dict:
+        return {self.kind.value: json.loads(self.data)}
+
+
+#import logging, operator
+#from typing import Dict, List, Optional, Tuple, Type, Union
+#from common.orm.HighLevel import get_object, get_or_create_object, update_or_create_object
+#from common.orm.backend.Tools import key_to_str
+#from common.proto.context_pb2 import Constraint
+#from common.tools.grpc.Tools import grpc_message_to_json_string
+#from .EndPointModel import EndPointModel
+#from .Tools import fast_hasher
+#from sqlalchemy import Column, ForeignKey, String, Float, CheckConstraint, Integer, Boolean, Enum
+#from sqlalchemy.dialects.postgresql import UUID
+#from context.service.database.models._Base import Base
+#import enum
+#
+#LOGGER = logging.getLogger(__name__)
+#
+#def remove_dict_key(dictionary : Dict, key : str):
+#    dictionary.pop(key, None)
+#    return dictionary
+#
+#class ConstraintsModel(Base): # pylint: disable=abstract-method
+#    __tablename__ = 'Constraints'
+#    constraints_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True)
+#
+#    @staticmethod
+#    def main_pk_name():
+#        return 'constraints_uuid'
+#
+#
+#    def dump(self, constraints) -> List[Dict]:
+#        constraints = sorted(constraints, key=operator.itemgetter('position'))
+#        return [remove_dict_key(constraint, 'position') for constraint in constraints]
+#
+#
+#class ConstraintCustomModel(Base): # pylint: disable=abstract-method
+#    __tablename__ = 'ConstraintCustom'
+#    constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True)
+#    constraint_type = Column(String, nullable=False)
+#    constraint_value = Column(String, nullable=False)
+#
+#    @staticmethod
+#    def main_pk_name():
+#        return 'constraint_uuid'
+#
+#
+#    def dump(self) -> Dict: # pylint: disable=arguments-differ
+#        return {'custom': {'constraint_type': self.constraint_type, 'constraint_value': self.constraint_value}}
+#
+#
+#Union_ConstraintEndpoint = Union[
+#    'ConstraintEndpointLocationGpsPositionModel', 'ConstraintEndpointLocationRegionModel',
+#    'ConstraintEndpointPriorityModel'
+#]
+#
+#class ConstraintEndpointLocationRegionModel(Model): # pylint: disable=abstract-method
+#    endpoint_fk = ForeignKeyField(EndPointModel)
+#    region = StringField(required=True, allow_empty=False)
+#
+#    def dump(self) -> Dict: # pylint: disable=arguments-differ
+#        json_endpoint_id = EndPointModel(self.database, self.endpoint_fk).dump_id()
+#        return {'endpoint_location': {'endpoint_id': json_endpoint_id, 'location': {'region': self.region}}}
+#
+## def dump_endpoint_id(endpoint_constraint: Union_ConstraintEndpoint):
+##     db_endpoints_pks = list(endpoint_constraint.references(EndPointModel))
+##     num_endpoints = len(db_endpoints_pks)
+##     if num_endpoints != 1:
+##         raise Exception('Wrong number({:d}) of associated Endpoints with constraint'.format(num_endpoints))
+##     db_endpoint = EndPointModel(endpoint_constraint.database, db_endpoints_pks[0])
+##     return db_endpoint.dump_id()
+#
+#
+#class ConstraintEndpointLocationRegionModel(Base): # pylint: disable=abstract-method
+#    __tablename__ = 'ConstraintEndpointLocationRegion'
+#    constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True)
+#    endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid"))
+#    region = Column(String, nullable=False)
+#
+#    @staticmethod
+#    def main_pk_name():
+#        return 'constraint_uuid'
+#
+#    def dump(self, endpoint) -> Dict: # pylint: disable=arguments-differ
+#        return {'endpoint_location': {'endpoint_id': endpoint.dump_id(), 'region': self.region}}
+#
+#    def dump(self) -> Dict: # pylint: disable=arguments-differ
+#        gps_position = {'latitude': self.latitude, 'longitude': self.longitude}
+#        json_endpoint_id = EndPointModel(self.database, self.endpoint_fk).dump_id()
+#        return {'endpoint_location': {'endpoint_id': json_endpoint_id, 'location': {'gps_position': gps_position}}}
+#
+#class ConstraintEndpointPriorityModel(Model): # pylint: disable=abstract-method
+#    endpoint_fk = ForeignKeyField(EndPointModel)
+#    priority = IntegerField(required=True, min_value=0)
+#
+#    def dump(self) -> Dict: # pylint: disable=arguments-differ
+#        json_endpoint_id = EndPointModel(self.database, self.endpoint_fk).dump_id()
+#        return {'endpoint_priority': {'endpoint_id': json_endpoint_id, 'priority': self.priority}}
+#
+#class ConstraintEndpointLocationGpsPositionModel(Base): # pylint: disable=abstract-method
+#    __tablename__ = 'ConstraintEndpointLocationGpsPosition'
+#    constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True)
+#    endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid"))
+#    latitude = Column(Float, CheckConstraint('latitude > -90.0 AND latitude < 90.0'), nullable=False)
+#    longitude = Column(Float, CheckConstraint('longitude > -90.0 AND longitude < 90.0'), nullable=False)
+#
+#    def dump(self, endpoint) -> Dict: # pylint: disable=arguments-differ
+#        gps_position = {'latitude': self.latitude, 'longitude': self.longitude}
+#        return {'endpoint_location': {'endpoint_id': endpoint.dump_id(), 'gps_position': gps_position}}
+#
+#
+#class ConstraintEndpointPriorityModel(Base): # pylint: disable=abstract-method
+#    __tablename__ = 'ConstraintEndpointPriority'
+#    constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True)
+#    endpoint_uuid = Column(UUID(as_uuid=False), ForeignKey("EndPoint.endpoint_uuid"))
+#    # endpoint_fk = ForeignKeyField(EndPointModel)
+#    # priority = FloatField(required=True)
+#    priority = Column(Float, nullable=False)
+#    @staticmethod
+#    def main_pk_name():
+#        return 'constraint_uuid'
+#
+#    def dump(self, endpoint) -> Dict: # pylint: disable=arguments-differ
+#        return {'endpoint_priority': {'endpoint_id': endpoint.dump_id(), 'priority': self.priority}}
+#
+#
+#class ConstraintSlaAvailabilityModel(Base): # pylint: disable=abstract-method
+#    __tablename__ = 'ConstraintSlaAvailability'
+#    constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True)
+#    # num_disjoint_paths = IntegerField(required=True, min_value=1)
+#    num_disjoint_paths = Column(Integer, CheckConstraint('num_disjoint_paths > 1'), nullable=False)
+#    # all_active = BooleanField(required=True)
+#    all_active = Column(Boolean, nullable=False)
+#    @staticmethod
+#    def main_pk_name():
+#        return 'constraint_uuid'
+#
+#    def dump(self) -> Dict: # pylint: disable=arguments-differ
+#        return {'sla_availability': {'num_disjoint_paths': self.num_disjoint_paths, 'all_active': self.all_active}}
+#
+#Union_SpecificConstraint = Union[
+#    ConstraintCustomModel, ConstraintEndpointLocationRegionModel, ConstraintEndpointLocationGpsPositionModel,
+#    ConstraintEndpointPriorityModel, ConstraintSlaAvailabilityModel,
+#]
+#
+#class ConstraintModel(Base): # pylint: disable=abstract-method
+#    __tablename__ = 'Constraint'
+#    # pk = PrimaryKeyField()
+#    # constraints_fk = ForeignKeyField(ConstraintsModel)
+#    constraint_uuid = Column(UUID(as_uuid=False), primary_key=True, unique=True)
+#    constraints_uuid = Column(UUID(as_uuid=False), ForeignKey("Constraints.constraints_uuid"), primary_key=True)
+#    # kind = EnumeratedField(ConstraintKindEnum)
+#    kind = Column(Enum(ConstraintKindEnum, create_constraint=False, native_enum=False))
+#    # position = IntegerField(min_value=0, required=True)
+#    position = Column(Integer, CheckConstraint('position >= 0'), nullable=False)
+#    # constraint_custom_fk        = ForeignKeyField(ConstraintCustomModel, required=False)
+#    constraint_custom = Column(UUID(as_uuid=False), ForeignKey("ConstraintCustom.constraint_uuid"))
+#    # constraint_ep_loc_region_fk = ForeignKeyField(ConstraintEndpointLocationRegionModel, required=False)
+#    constraint_ep_loc_region = Column(UUID(as_uuid=False), ForeignKey("ConstraintEndpointLocationRegion.constraint_uuid"))
+#    # constraint_ep_loc_gpspos_fk = ForeignKeyField(ConstraintEndpointLocationGpsPositionModel, required=False)
+#    constraint_ep_loc_gpspos = Column(UUID(as_uuid=False), ForeignKey("ConstraintEndpointLocationGpsPosition.constraint_uuid"))
+#    # constraint_ep_priority_fk   = ForeignKeyField(ConstraintEndpointPriorityModel, required=False)
+#    constraint_ep_priority = Column(UUID(as_uuid=False), ForeignKey("ConstraintEndpointPriority.constraint_uuid"),)
+#    # constraint_sla_avail_fk     = ForeignKeyField(ConstraintSlaAvailabilityModel, required=False)
+#    constraint_sla_avail = Column(UUID(as_uuid=False), ForeignKey("ConstraintSlaAvailability.constraint_uuid"))
+#
+#    @staticmethod
+#    def main_pk_name():
+#        return 'constraint_uuid'
+#
+#    # def delete(self) -> None:
+#    #     field_name = 'constraint_{:s}_fk'.format(str(self.kind.value))
+#    #     specific_fk_value : Optional[ForeignKeyField] = getattr(self, field_name, None)
+#    #     if specific_fk_value is None:
+#    #         raise Exception('Unable to find constraint key for field_name({:s})'.format(field_name))
+#    #     specific_fk_class = getattr(ConstraintModel, field_name, None)
+#    #     foreign_model_class : Model = specific_fk_class.foreign_model
+#    #     super().delete()
+#    #     get_object(self.database, foreign_model_class, str(specific_fk_value)).delete()
+#
+#    def dump(self, include_position=True) -> Dict: # pylint: disable=arguments-differ
+#        field_name = 'constraint_{:s}'.format(str(self.kind.value))
+#        specific_fk_value = getattr(self, field_name, None)
+#        if specific_fk_value is None:
+#            raise Exception('Unable to find constraint key for field_name({:s})'.format(field_name))
+#        specific_fk_class = getattr(ConstraintModel, field_name, None)
+#        foreign_model_class: Base = specific_fk_class.foreign_model
+#        constraint: Union_SpecificConstraint = get_object(self.database, foreign_model_class, str(specific_fk_value))
+#        result = constraint.dump()
+#        if include_position:
+#            result['position'] = self.position
+#        return result
+#
+#Tuple_ConstraintSpecs = Tuple[Type, str, Dict, ConstraintKindEnum]
+#
+#def parse_constraint_custom(grpc_constraint) -> Tuple_ConstraintSpecs:
+#    constraint_class = ConstraintCustomModel
+#    str_constraint_id = grpc_constraint.custom.constraint_type
+#    constraint_data = {
+#        'constraint_type' : grpc_constraint.custom.constraint_type,
+#        'constraint_value': grpc_constraint.custom.constraint_value,
+#    }
+#    return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.CUSTOM
+#
+#def parse_constraint_endpoint_location(db_endpoint, grpc_constraint) -> Tuple_ConstraintSpecs:
+#    grpc_endpoint_id = grpc_constraint.endpoint_location.endpoint_id
+#    # str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id)
+#
+#    str_constraint_id = db_endpoint.endpoint_uuid
+#    constraint_data = {'endpoint_fk': db_endpoint}
+#
+#    grpc_location = grpc_constraint.endpoint_location.location
+#    location_kind = str(grpc_location.WhichOneof('location'))
+#    if location_kind == 'region':
+#        constraint_class = ConstraintEndpointLocationRegionModel
+#        constraint_data.update({'region': grpc_location.region})
+#        return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.ENDPOINT_LOCATION_REGION
+#    elif location_kind == 'gps_position':
+#        constraint_class = ConstraintEndpointLocationGpsPositionModel
+#        gps_position = grpc_location.gps_position
+#        constraint_data.update({'latitude': gps_position.latitude, 'longitude': gps_position.longitude})
+#        return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.ENDPOINT_LOCATION_GPSPOSITION
+#    else:
+#        MSG = 'Location kind {:s} in Constraint of kind endpoint_location is not implemented: {:s}'
+#        raise NotImplementedError(MSG.format(location_kind, grpc_message_to_json_string(grpc_constraint)))
+#
+#def parse_constraint_endpoint_priority(db_endpoint, grpc_constraint) -> Tuple_ConstraintSpecs:
+#    grpc_endpoint_id = grpc_constraint.endpoint_priority.endpoint_id
+#    # str_endpoint_key, db_endpoint = get_endpoint(database, grpc_endpoint_id)
+#
+#    constraint_class = ConstraintEndpointPriorityModel
+#    str_constraint_id = db_endpoint.endpoint_uuid
+#    priority = grpc_constraint.endpoint_priority.priority
+#    constraint_data = {'endpoint_fk': db_endpoint, 'priority': priority}
+#
+#    return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.ENDPOINT_PRIORITY
+#
+#def parse_constraint_sla_availability(grpc_constraint) -> Tuple_ConstraintSpecs:
+#    constraint_class = ConstraintSlaAvailabilityModel
+#    str_constraint_id = ''
+#    constraint_data = {
+#        'num_disjoint_paths' : grpc_constraint.sla_availability.num_disjoint_paths,
+#        'all_active': grpc_constraint.sla_availability.all_active,
+#    }
+#    return constraint_class, str_constraint_id, constraint_data, ConstraintKindEnum.SLA_AVAILABILITY
+#
+#CONSTRAINT_PARSERS = {
+#    'custom'            : parse_constraint_custom,
+#    'endpoint_location' : parse_constraint_endpoint_location,
+#    'endpoint_priority' : parse_constraint_endpoint_priority,
+#    'sla_availability'  : parse_constraint_sla_availability,
+#}
+#
+#Union_ConstraintModel = Union[
+#    ConstraintCustomModel, ConstraintEndpointLocationGpsPositionModel, ConstraintEndpointLocationRegionModel,
+#    ConstraintEndpointPriorityModel, ConstraintSlaAvailabilityModel
+#]
+#
+## def set_constraint(
+##     db_constraints : ConstraintsModel, grpc_constraint : Constraint, position : int
+## ) -> Tuple[Union_ConstraintModel, bool]:
+##     grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint'))
+##
+##     parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind)
+##     if parser is None:
+##         raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format(
+##             grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint)))
+##
+##     # create specific constraint
+##     constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(database, grpc_constraint)
+##     str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id]))
+##     str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':')
+##     result : Tuple[Union_ConstraintModel, bool] = update_or_create_object(
+##         database, constraint_class, str_constraint_key, constraint_data)
+##     db_specific_constraint, updated = result
+##
+##     # create generic constraint
+##     constraint_fk_field_name = 'constraint_{:s}_fk'.format(constraint_kind.value)
+##     constraint_data = {
+##         'constraints_fk': db_constraints, 'position': position, 'kind': constraint_kind,
+##         constraint_fk_field_name: db_specific_constraint
+##     }
+##     result : Tuple[ConstraintModel, bool] = update_or_create_object(
+##         database, ConstraintModel, str_constraint_key, constraint_data)
+##     db_constraint, updated = result
+##
+##     return db_constraint, updated
+##
+## def set_constraints(
+##     database : Database, db_parent_pk : str, constraints_name : str, grpc_constraints
+## ) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]:
+##
+##     str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':')
+##     result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key)
+##     db_constraints, created = result
+##
+##     db_objects = [(db_constraints, created)]
+##
+##     for position,grpc_constraint in enumerate(grpc_constraints):
+##         result : Tuple[ConstraintModel, bool] = set_constraint(
+##             database, db_constraints, grpc_constraint, position)
+##         db_constraint, updated = result
+##         db_objects.append((db_constraint, updated))
+##
+##     return db_objects
+#def set_constraint(
+#    database : Database, db_constraints : ConstraintsModel, grpc_constraint : Constraint, position : int
+#) -> Tuple[Union_ConstraintModel, bool]:
+#    grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint'))
+#
+#    parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind)
+#    if parser is None:
+#        raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format(
+#            grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint)))
+#
+#    # create specific constraint
+#    constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(database, grpc_constraint)
+#    str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id]))
+#    str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':')
+#    result : Tuple[Union_ConstraintModel, bool] = update_or_create_object(
+#        database, constraint_class, str_constraint_key, constraint_data)
+#    db_specific_constraint, updated = result
+#
+#    # create generic constraint
+#    constraint_fk_field_name = 'constraint_{:s}_fk'.format(constraint_kind.value)
+#    constraint_data = {
+#        'constraints_fk': db_constraints, 'position': position, 'kind': constraint_kind,
+#        constraint_fk_field_name: db_specific_constraint
+#    }
+#    result : Tuple[ConstraintModel, bool] = update_or_create_object(
+#        database, ConstraintModel, str_constraint_key, constraint_data)
+#    db_constraint, updated = result
+#
+#    return db_constraint, updated
+#
+#def set_constraints(
+#    database : Database, db_parent_pk : str, constraints_name : str, grpc_constraints
+#) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]:
+#
+#    str_constraints_key = key_to_str([constraints_name, db_parent_pk], separator=':')
+#    result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key)
+#    db_constraints, created = result
+#
+#    db_objects = [(db_constraints, created)]
+#
+#    for position,grpc_constraint in enumerate(grpc_constraints):
+#        result : Tuple[ConstraintModel, bool] = set_constraint(
+#            database, db_constraints, grpc_constraint, position)
+#        db_constraint, updated = result
+#        db_objects.append((db_constraint, updated))
+#
+#    return db_objects
diff --git a/src/context/service/database/models/ContextModel.py b/src/context/service/database/models/ContextModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..26ccd8c60f12c841aeb43c0dfc11754bb45843b0
--- /dev/null
+++ b/src/context/service/database/models/ContextModel.py
@@ -0,0 +1,43 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from sqlalchemy import Column, DateTime, String
+from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy.orm import relationship
+from typing import Dict
+from ._Base import _Base
+
+class ContextModel(_Base):
+    __tablename__ = 'context'
+
+    context_uuid = Column(UUID(as_uuid=False), primary_key=True)
+    context_name = Column(String, nullable=False)
+    created_at   = Column(DateTime, nullable=False)
+    updated_at   = Column(DateTime, nullable=False)
+
+    topologies = relationship('TopologyModel', back_populates='context')
+    services   = relationship('ServiceModel',  back_populates='context')
+    slices     = relationship('SliceModel',    back_populates='context')
+
+    def dump_id(self) -> Dict:
+        return {'context_uuid': {'uuid': self.context_uuid}}
+
+    def dump(self) -> Dict:
+        return {
+            'context_id'  : self.dump_id(),
+            'name'        : self.context_name,
+            'topology_ids': [obj.dump_id() for obj in self.topologies],
+            'service_ids' : [obj.dump_id() for obj in self.services  ],
+            'slice_ids'   : [obj.dump_id() for obj in self.slices    ],
+        }
diff --git a/src/context/service/database/models/DeviceModel.py b/src/context/service/database/models/DeviceModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..d73cec75d70d3f12cf964258a84adfab79978082
--- /dev/null
+++ b/src/context/service/database/models/DeviceModel.py
@@ -0,0 +1,57 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import operator
+from sqlalchemy import Column, DateTime, Enum, String
+from sqlalchemy.dialects.postgresql import ARRAY, UUID
+from sqlalchemy.orm import relationship
+from typing import Dict
+from .enums.DeviceDriver import ORM_DeviceDriverEnum
+from .enums.DeviceOperationalStatus import ORM_DeviceOperationalStatusEnum
+from ._Base import _Base
+
+class DeviceModel(_Base):
+    __tablename__ = 'device'
+
+    device_uuid               = Column(UUID(as_uuid=False), primary_key=True)
+    device_name               = Column(String, nullable=False)
+    device_type               = Column(String, nullable=False)
+    device_operational_status = Column(Enum(ORM_DeviceOperationalStatusEnum), nullable=False)
+    device_drivers            = Column(ARRAY(Enum(ORM_DeviceDriverEnum), dimensions=1))
+    created_at                = Column(DateTime, nullable=False)
+    updated_at                = Column(DateTime, nullable=False)
+
+    #topology_devices = relationship('TopologyDeviceModel', back_populates='device')
+    config_rules = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='device'
+    endpoints    = relationship('EndPointModel', passive_deletes=True) # lazy='joined', back_populates='device'
+
+    def dump_id(self) -> Dict:
+        return {'device_uuid': {'uuid': self.device_uuid}}
+
+    def dump(self) -> Dict:
+        return {
+            'device_id'                : self.dump_id(),
+            'name'                     : self.device_name,
+            'device_type'              : self.device_type,
+            'device_operational_status': self.device_operational_status.value,
+            'device_drivers'           : [driver.value for driver in self.device_drivers],
+            'device_config'            : {'config_rules': [
+                config_rule.dump()
+                for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
+            ]},
+            'device_endpoints'         : [
+                endpoint.dump()
+                for endpoint in self.endpoints
+            ],
+        }
diff --git a/src/context/service/database/models/EndPointModel.py b/src/context/service/database/models/EndPointModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..07a5df2bf69e154f8d969d716a2eb914145f9919
--- /dev/null
+++ b/src/context/service/database/models/EndPointModel.py
@@ -0,0 +1,53 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from sqlalchemy import Column, DateTime, Enum, ForeignKey, String
+from sqlalchemy.dialects.postgresql import ARRAY, UUID
+from sqlalchemy.orm import relationship
+from typing import Dict
+from .enums.KpiSampleType import ORM_KpiSampleTypeEnum
+from ._Base import _Base
+
+class EndPointModel(_Base):
+    __tablename__ = 'endpoint'
+
+    endpoint_uuid    = Column(UUID(as_uuid=False), primary_key=True)
+    device_uuid      = Column(ForeignKey('device.device_uuid',     ondelete='CASCADE' ), nullable=False)
+    topology_uuid    = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), nullable=False)
+    name             = Column(String, nullable=False)
+    endpoint_type    = Column(String, nullable=False)
+    kpi_sample_types = Column(ARRAY(Enum(ORM_KpiSampleTypeEnum), dimensions=1))
+    created_at       = Column(DateTime, nullable=False)
+    updated_at       = Column(DateTime, nullable=False)
+
+    device            = relationship('DeviceModel',          back_populates='endpoints')
+    topology          = relationship('TopologyModel')
+    #link_endpoints    = relationship('LinkEndPointModel',    back_populates='endpoint' )
+    #service_endpoints = relationship('ServiceEndPointModel', back_populates='endpoint' )
+
+    def dump_id(self) -> Dict:
+        result = {
+            'topology_id'  : self.topology.dump_id(),
+            'device_id'    : self.device.dump_id(),
+            'endpoint_uuid': {'uuid': self.endpoint_uuid},
+        }
+        return result
+
+    def dump(self) -> Dict:
+        return {
+            'endpoint_id'     : self.dump_id(),
+            'name'            : self.name,
+            'endpoint_type'   : self.endpoint_type,
+            'kpi_sample_types': [kst.value for kst in self.kpi_sample_types],
+        }
diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..abf37a28afe6db4a27e225de8e86b0d6d323b420
--- /dev/null
+++ b/src/context/service/database/models/LinkModel.py
@@ -0,0 +1,52 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from sqlalchemy import Column, DateTime, ForeignKey, String
+from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy.orm import relationship
+from typing import Dict
+from ._Base import _Base
+
+class LinkModel(_Base):
+    __tablename__ = 'link'
+
+    link_uuid  = Column(UUID(as_uuid=False), primary_key=True)
+    link_name  = Column(String, nullable=False)
+    created_at = Column(DateTime, nullable=False)
+    updated_at = Column(DateTime, nullable=False)
+
+    #topology_links = relationship('TopologyLinkModel', back_populates='link')
+    link_endpoints = relationship('LinkEndPointModel') # lazy='joined', back_populates='link'
+
+    def dump_id(self) -> Dict:
+        return {'link_uuid': {'uuid': self.link_uuid}}
+
+    def dump(self) -> Dict:
+        return {
+            'link_id'          : self.dump_id(),
+            'name'             : self.link_name,
+            'link_endpoint_ids': [
+                link_endpoint.endpoint.dump_id()
+                for link_endpoint in self.link_endpoints
+            ],
+        }
+
+class LinkEndPointModel(_Base):
+    __tablename__ = 'link_endpoint'
+
+    link_uuid     = Column(ForeignKey('link.link_uuid',         ondelete='CASCADE' ), primary_key=True)
+    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True)
+
+    link     = relationship('LinkModel',     back_populates='link_endpoints', lazy='joined')
+    endpoint = relationship('EndPointModel', lazy='joined') # back_populates='link_endpoints'
diff --git a/src/context/service/database/models/PolicyRuleModel.py b/src/context/service/database/models/PolicyRuleModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ccec8dd8fe5c3e6c0a00fea691c9e380c8fddcd
--- /dev/null
+++ b/src/context/service/database/models/PolicyRuleModel.py
@@ -0,0 +1,77 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum, json
+from sqlalchemy import CheckConstraint, Column, DateTime, Enum, ForeignKey, Integer, String
+from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy.orm import relationship
+from typing import Dict
+from .enums.PolicyRuleState import ORM_PolicyRuleStateEnum
+from ._Base import _Base
+
+# Enum values should match name of field in PolicyRule message
+class PolicyRuleKindEnum(enum.Enum):
+    DEVICE  = 'device'
+    SERVICE = 'service'
+
+class PolicyRuleModel(_Base):
+    __tablename__ = 'policyrule'
+
+    policyrule_uuid          = Column(UUID(as_uuid=False), primary_key=True)
+    policyrule_kind          = Column(Enum(PolicyRuleKindEnum), nullable=False)
+    policyrule_state         = Column(Enum(ORM_PolicyRuleStateEnum), nullable=False)
+    policyrule_state_message = Column(String, nullable=False)
+    policyrule_priority      = Column(Integer, nullable=False)
+    policyrule_service_uuid  = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), nullable=True)
+    policyrule_eca_data      = Column(String, nullable=False)
+    created_at               = Column(DateTime, nullable=False)
+    updated_at               = Column(DateTime, nullable=False)
+
+    policyrule_service = relationship('ServiceModel') # back_populates='policyrules'
+    policyrule_devices = relationship('PolicyRuleDeviceModel' ) # back_populates='policyrule'
+
+    __table_args__ = (
+        CheckConstraint(policyrule_priority >= 0, name='check_priority_value'),
+    )
+
+    def dump_id(self) -> Dict:
+        return {'uuid': {'uuid': self.policyrule_uuid}}
+
+    def dump(self) -> Dict:
+        # Load JSON-encoded Event-Condition-Action (ECA) model data and populate with policy basic details
+        policyrule_basic = json.loads(self.policyrule_eca_data)
+        policyrule_basic.update({
+            'policyRuleId': self.dump_id(),
+            'policyRuleState': {
+                'policyRuleState': self.policyrule_state.value,
+                'policyRuleStateMessage': self.policyrule_state_message,
+            },
+            'priority': self.policyrule_priority,
+        })
+        result = {
+            'policyRuleBasic': policyrule_basic,
+            'deviceList': [{'device_uuid': {'uuid': pr_d.device_uuid}} for pr_d in self.policyrule_devices],
+        }
+        if self.policyrule_kind == PolicyRuleKindEnum.SERVICE:
+            result['serviceId'] = self.policyrule_service.dump_id(),
+        return {self.policyrule_kind.value: result}
+
+class PolicyRuleDeviceModel(_Base):
+    __tablename__ = 'policyrule_device'
+
+    policyrule_uuid = Column(ForeignKey('policyrule.policyrule_uuid', ondelete='RESTRICT'), primary_key=True)
+    device_uuid     = Column(ForeignKey('device.device_uuid',         ondelete='RESTRICT'), primary_key=True)
+
+    #policyrule = relationship('PolicyRuleModel', lazy='joined') # back_populates='policyrule_devices'
+    device     = relationship('DeviceModel',     lazy='joined') # back_populates='policyrule_devices'
diff --git a/src/context/service/database/models/ServiceModel.py b/src/context/service/database/models/ServiceModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a28dbce288860afd53f19640621e788674dbdff
--- /dev/null
+++ b/src/context/service/database/models/ServiceModel.py
@@ -0,0 +1,73 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import operator
+from sqlalchemy import Column, DateTime, Enum, ForeignKey, String
+from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy.orm import relationship
+from typing import Dict
+from .enums.ServiceStatus import ORM_ServiceStatusEnum
+from .enums.ServiceType import ORM_ServiceTypeEnum
+from ._Base import _Base
+
+class ServiceModel(_Base):
+    __tablename__ = 'service'
+
+    service_uuid   = Column(UUID(as_uuid=False), primary_key=True)
+    context_uuid   = Column(ForeignKey('context.context_uuid'), nullable=False)
+    service_name   = Column(String, nullable=False)
+    service_type   = Column(Enum(ORM_ServiceTypeEnum), nullable=False)
+    service_status = Column(Enum(ORM_ServiceStatusEnum), nullable=False)
+    created_at     = Column(DateTime, nullable=False)
+    updated_at     = Column(DateTime, nullable=False)
+
+    context           = relationship('ContextModel', back_populates='services')
+    service_endpoints = relationship('ServiceEndPointModel') # lazy='joined', back_populates='service'
+    constraints       = relationship('ConstraintModel', passive_deletes=True) # lazy='joined', back_populates='service'
+    config_rules      = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='service'
+
+    def dump_id(self) -> Dict:
+        return {
+            'context_id': self.context.dump_id(),
+            'service_uuid': {'uuid': self.service_uuid},
+        }
+
+    def dump(self) -> Dict:
+        return {
+            'service_id'          : self.dump_id(),
+            'name'                : self.service_name,
+            'service_type'        : self.service_type.value,
+            'service_status'      : {'service_status': self.service_status.value},
+            'service_endpoint_ids': [
+                service_endpoint.endpoint.dump_id()
+                for service_endpoint in self.service_endpoints
+            ],
+            'service_constraints' : [
+                constraint.dump()
+                for constraint in sorted(self.constraints, key=operator.attrgetter('position'))
+            ],
+            'service_config'      : {'config_rules': [
+                config_rule.dump()
+                for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
+            ]},
+        }
+
+class ServiceEndPointModel(_Base):
+    __tablename__ = 'service_endpoint'
+
+    service_uuid  = Column(ForeignKey('service.service_uuid',   ondelete='CASCADE' ), primary_key=True)
+    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True)
+
+    service  = relationship('ServiceModel',  back_populates='service_endpoints', lazy='joined')
+    endpoint = relationship('EndPointModel', lazy='joined') # back_populates='service_endpoints'
diff --git a/src/context/service/database/models/SliceModel.py b/src/context/service/database/models/SliceModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c9ebafa48c302b959bb265a35a80706da0a3975
--- /dev/null
+++ b/src/context/service/database/models/SliceModel.py
@@ -0,0 +1,102 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import operator
+from sqlalchemy import Column, DateTime, Enum, ForeignKey, String
+from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy.orm import relationship
+from typing import Dict
+from .enums.SliceStatus import ORM_SliceStatusEnum
+from ._Base import _Base
+
+class SliceModel(_Base):
+    __tablename__ = 'slice'
+
+    slice_uuid         = Column(UUID(as_uuid=False), primary_key=True)
+    context_uuid       = Column(ForeignKey('context.context_uuid'), nullable=False)
+    slice_name         = Column(String, nullable=True)
+    slice_status       = Column(Enum(ORM_SliceStatusEnum), nullable=False)
+    slice_owner_uuid   = Column(String, nullable=True)
+    slice_owner_string = Column(String, nullable=True)
+    created_at         = Column(DateTime, nullable=False)
+    updated_at         = Column(DateTime, nullable=False)
+
+    context         = relationship('ContextModel', back_populates='slices')
+    slice_endpoints = relationship('SliceEndPointModel') # lazy='joined', back_populates='slice'
+    slice_services  = relationship('SliceServiceModel') # lazy='joined', back_populates='slice'
+    slice_subslices = relationship(
+        'SliceSubSliceModel', primaryjoin='slice.c.slice_uuid == slice_subslice.c.slice_uuid')
+    constraints     = relationship('ConstraintModel', passive_deletes=True) # lazy='joined', back_populates='slice'
+    config_rules    = relationship('ConfigRuleModel', passive_deletes=True) # lazy='joined', back_populates='slice'
+
+    def dump_id(self) -> Dict:
+        return {
+            'context_id': self.context.dump_id(),
+            'slice_uuid': {'uuid': self.slice_uuid},
+        }
+
+    def dump(self) -> Dict:
+        return {
+            'slice_id'          : self.dump_id(),
+            'name'              : self.slice_name,
+            'slice_status'      : {'slice_status': self.slice_status.value},
+            'slice_endpoint_ids': [
+                slice_endpoint.endpoint.dump_id()
+                for slice_endpoint in self.slice_endpoints
+            ],
+            'slice_constraints' : [
+                constraint.dump()
+                for constraint in sorted(self.constraints, key=operator.attrgetter('position'))
+            ],
+            'slice_config'      : {'config_rules': [
+                config_rule.dump()
+                for config_rule in sorted(self.config_rules, key=operator.attrgetter('position'))
+            ]},
+            'slice_service_ids': [
+                slice_service.service.dump_id()
+                for slice_service in self.slice_services
+            ],
+            'slice_subslice_ids': [
+                slice_subslice.subslice.dump_id()
+                for slice_subslice in self.slice_subslices
+            ],
+            'slice_owner': {
+                'owner_uuid': {'uuid': self.slice_owner_uuid},
+                'owner_string': self.slice_owner_string
+            }
+        }
+
+class SliceEndPointModel(_Base):
+    __tablename__ = 'slice_endpoint'
+
+    slice_uuid    = Column(ForeignKey('slice.slice_uuid',       ondelete='CASCADE' ), primary_key=True)
+    endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True)
+
+    slice    = relationship('SliceModel', back_populates='slice_endpoints', lazy='joined')
+    endpoint = relationship('EndPointModel', lazy='joined') # back_populates='slice_endpoints'
+
+class SliceServiceModel(_Base):
+    __tablename__ = 'slice_service'
+
+    slice_uuid   = Column(ForeignKey('slice.slice_uuid',     ondelete='CASCADE' ), primary_key=True)
+    service_uuid = Column(ForeignKey('service.service_uuid', ondelete='RESTRICT'), primary_key=True)
+
+    slice   = relationship('SliceModel', back_populates='slice_services', lazy='joined')
+    service = relationship('ServiceModel', lazy='joined') # back_populates='slice_services'
+
+class SliceSubSliceModel(_Base):
+    __tablename__ = 'slice_subslice'
+
+    slice_uuid    = Column(ForeignKey('slice.slice_uuid', ondelete='CASCADE' ), primary_key=True)
+    subslice_uuid = Column(ForeignKey('slice.slice_uuid', ondelete='RESTRICT'), primary_key=True)
diff --git a/src/context/service/database/models/TopologyModel.py b/src/context/service/database/models/TopologyModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..59659ecd3f8ad9f896bd7d8f07c1f65ba1d6f27d
--- /dev/null
+++ b/src/context/service/database/models/TopologyModel.py
@@ -0,0 +1,64 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from sqlalchemy import Column, DateTime, ForeignKey, String
+from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy.orm import relationship
+from typing import Dict
+from ._Base import _Base
+
+class TopologyModel(_Base):
+    __tablename__ = 'topology'
+
+    topology_uuid = Column(UUID(as_uuid=False), primary_key=True)
+    context_uuid  = Column(ForeignKey('context.context_uuid'), nullable=False)
+    topology_name = Column(String, nullable=False)
+    created_at    = Column(DateTime, nullable=False)
+    updated_at    = Column(DateTime, nullable=False)
+
+    context          = relationship('ContextModel', back_populates='topologies')
+    topology_devices = relationship('TopologyDeviceModel') # back_populates='topology'
+    topology_links   = relationship('TopologyLinkModel'  ) # back_populates='topology'
+
+    def dump_id(self) -> Dict:
+        return {
+            'context_id': self.context.dump_id(),
+            'topology_uuid': {'uuid': self.topology_uuid},
+        }
+
+    def dump(self) -> Dict:
+        return {
+            'topology_id': self.dump_id(),
+            'name'       : self.topology_name,
+            'device_ids' : [{'device_uuid': {'uuid': td.device_uuid}} for td in self.topology_devices],
+            'link_ids'   : [{'link_uuid'  : {'uuid': tl.link_uuid  }} for tl in self.topology_links  ],
+        }
+
+class TopologyDeviceModel(_Base):
+    __tablename__ = 'topology_device'
+
+    topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True)
+    device_uuid   = Column(ForeignKey('device.device_uuid',     ondelete='CASCADE' ), primary_key=True)
+
+    #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_devices'
+    device   = relationship('DeviceModel',   lazy='joined') # back_populates='topology_devices'
+
+class TopologyLinkModel(_Base):
+    __tablename__ = 'topology_link'
+
+    topology_uuid = Column(ForeignKey('topology.topology_uuid', ondelete='RESTRICT'), primary_key=True)
+    link_uuid     = Column(ForeignKey('link.link_uuid',         ondelete='CASCADE' ), primary_key=True)
+
+    #topology = relationship('TopologyModel', lazy='joined') # back_populates='topology_links'
+    link     = relationship('LinkModel',     lazy='joined') # back_populates='topology_links'
diff --git a/src/context/service/grpc_server/Constants.py b/src/context/service/database/models/_Base.py
similarity index 64%
rename from src/context/service/grpc_server/Constants.py
rename to src/context/service/database/models/_Base.py
index 9d7c886c725d22308f33dc274234ad17f595633d..49269be08f17bc6954da50e9169990d9a438eefe 100644
--- a/src/context/service/grpc_server/Constants.py
+++ b/src/context/service/database/models/_Base.py
@@ -12,14 +12,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-TOPIC_CONNECTION = 'connection'
-TOPIC_CONTEXT    = 'context'
-TOPIC_TOPOLOGY   = 'topology'
-TOPIC_DEVICE     = 'device'
-TOPIC_LINK       = 'link'
-TOPIC_SERVICE    = 'service'
-TOPIC_SLICE      = 'slice'
+import sqlalchemy
+from sqlalchemy.orm import declarative_base
 
-TOPICS = {TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_TOPOLOGY, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE}
+_Base = declarative_base()
 
-CONSUME_TIMEOUT = 0.5 # seconds
+def rebuild_database(db_engine : sqlalchemy.engine.Engine, drop_if_exists : bool = False):
+    if drop_if_exists: _Base.metadata.drop_all(db_engine)
+    _Base.metadata.create_all(db_engine)
diff --git a/src/context/service/database/models/__init__.py b/src/context/service/database/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a
--- /dev/null
+++ b/src/context/service/database/models/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/context/service/rest_server/RestServer.py b/src/context/service/database/models/enums/ConfigAction.py
similarity index 56%
rename from src/context/service/rest_server/RestServer.py
rename to src/context/service/database/models/enums/ConfigAction.py
index 289e92a3c1b74e207a261b133130a551c3c55918..6bbcdea99c02823e982d19d1ad8d12c77f17dbdb 100644
--- a/src/context/service/rest_server/RestServer.py
+++ b/src/context/service/database/models/enums/ConfigAction.py
@@ -12,12 +12,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from common.Constants import ServiceNameEnum
-from common.Settings import get_service_baseurl_http, get_service_port_http
-from common.tools.service.GenericRestServer import GenericRestServer
+import enum, functools
+from common.proto.context_pb2 import ConfigActionEnum
+from ._GrpcToEnum import grpc_to_enum
 
-class RestServer(GenericRestServer):
-    def __init__(self, cls_name: str = __name__) -> None:
-        bind_port = get_service_port_http(ServiceNameEnum.CONTEXT)
-        base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-        super().__init__(bind_port, base_url, cls_name=cls_name)
+class ORM_ConfigActionEnum(enum.Enum):
+    UNDEFINED = ConfigActionEnum.CONFIGACTION_UNDEFINED
+    SET       = ConfigActionEnum.CONFIGACTION_SET
+    DELETE    = ConfigActionEnum.CONFIGACTION_DELETE
+
+grpc_to_enum__config_action = functools.partial(
+    grpc_to_enum, ConfigActionEnum, ORM_ConfigActionEnum)
diff --git a/src/context/service/database/models/enums/DeviceDriver.py b/src/context/service/database/models/enums/DeviceDriver.py
new file mode 100644
index 0000000000000000000000000000000000000000..21338ddb8dc0de111e889a7041a65a6fa0219cfd
--- /dev/null
+++ b/src/context/service/database/models/enums/DeviceDriver.py
@@ -0,0 +1,29 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum, functools
+from common.proto.context_pb2 import DeviceDriverEnum
+from ._GrpcToEnum import grpc_to_enum
+
+class ORM_DeviceDriverEnum(enum.Enum):
+    UNDEFINED             = DeviceDriverEnum.DEVICEDRIVER_UNDEFINED
+    OPENCONFIG            = DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG
+    TRANSPORT_API         = DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API
+    P4                    = DeviceDriverEnum.DEVICEDRIVER_P4
+    IETF_NETWORK_TOPOLOGY = DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY
+    ONF_TR_352            = DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352
+    XR                    = DeviceDriverEnum.DEVICEDRIVER_XR
+
+grpc_to_enum__device_driver = functools.partial(
+    grpc_to_enum, DeviceDriverEnum, ORM_DeviceDriverEnum)
diff --git a/src/context/service/database/models/enums/DeviceOperationalStatus.py b/src/context/service/database/models/enums/DeviceOperationalStatus.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bfe60779101a7021a40c00c6ee91cf28e2304ca
--- /dev/null
+++ b/src/context/service/database/models/enums/DeviceOperationalStatus.py
@@ -0,0 +1,25 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum, functools
+from common.proto.context_pb2 import DeviceOperationalStatusEnum
+from ._GrpcToEnum import grpc_to_enum
+
+class ORM_DeviceOperationalStatusEnum(enum.Enum):
+    UNDEFINED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED
+    DISABLED  = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
+    ENABLED   = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+
+grpc_to_enum__device_operational_status = functools.partial(
+    grpc_to_enum, DeviceOperationalStatusEnum, ORM_DeviceOperationalStatusEnum)
diff --git a/src/context/service/database/KpiSampleType.py b/src/context/service/database/models/enums/KpiSampleType.py
similarity index 91%
rename from src/context/service/database/KpiSampleType.py
rename to src/context/service/database/models/enums/KpiSampleType.py
index 0a2015b3fdeaceeed8b01619805f55f2a9267468..4126e90b2373a1d720b6a35e8e122be10d3d74e5 100644
--- a/src/context/service/database/KpiSampleType.py
+++ b/src/context/service/database/models/enums/KpiSampleType.py
@@ -12,12 +12,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import functools
-from enum import Enum
+import enum, functools
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
-from .Tools import grpc_to_enum
+from ._GrpcToEnum import grpc_to_enum
 
-class ORM_KpiSampleTypeEnum(Enum):
+class ORM_KpiSampleTypeEnum(enum.Enum):
     UNKNOWN             = KpiSampleType.KPISAMPLETYPE_UNKNOWN
     PACKETS_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED
     PACKETS_RECEIVED    = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
diff --git a/src/context/service/database/models/enums/PolicyRuleState.py b/src/context/service/database/models/enums/PolicyRuleState.py
new file mode 100644
index 0000000000000000000000000000000000000000..9917b181907b393a1a33a7f8dc58c1426bb8ec15
--- /dev/null
+++ b/src/context/service/database/models/enums/PolicyRuleState.py
@@ -0,0 +1,33 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum, functools
+from common.proto.policy_pb2 import PolicyRuleStateEnum
+from ._GrpcToEnum import grpc_to_enum
+
+class ORM_PolicyRuleStateEnum(enum.Enum):
+    UNDEFINED   = PolicyRuleStateEnum.POLICY_UNDEFINED   # Undefined rule state
+    FAILED      = PolicyRuleStateEnum.POLICY_FAILED      # Rule failed
+    INSERTED    = PolicyRuleStateEnum.POLICY_INSERTED    # Rule is just inserted
+    VALIDATED   = PolicyRuleStateEnum.POLICY_VALIDATED   # Rule content is correct
+    PROVISIONED = PolicyRuleStateEnum.POLICY_PROVISIONED # Rule subscribed to Monitoring
+    ACTIVE      = PolicyRuleStateEnum.POLICY_ACTIVE      # Rule is currently active (alarm is just thrown by Monitoring)
+    ENFORCED    = PolicyRuleStateEnum.POLICY_ENFORCED    # Rule action is successfully enforced
+    INEFFECTIVE = PolicyRuleStateEnum.POLICY_INEFFECTIVE # The applied rule action did not work as expected
+    EFFECTIVE   = PolicyRuleStateEnum.POLICY_EFFECTIVE   # The applied rule action did work as expected
+    UPDATED     = PolicyRuleStateEnum.POLICY_UPDATED     # Operator requires a policy to change
+    REMOVED     = PolicyRuleStateEnum.POLICY_REMOVED     # Operator requires to remove a policy
+
+grpc_to_enum__policyrule_state = functools.partial(
+    grpc_to_enum, PolicyRuleStateEnum, ORM_PolicyRuleStateEnum, grpc_enum_prefix='POLICY_')
diff --git a/src/context/service/database/models/enums/ServiceStatus.py b/src/context/service/database/models/enums/ServiceStatus.py
new file mode 100644
index 0000000000000000000000000000000000000000..5afd5da8faf72f8ea1a622010ddd2e78d4f82a16
--- /dev/null
+++ b/src/context/service/database/models/enums/ServiceStatus.py
@@ -0,0 +1,26 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum, functools
+from common.proto.context_pb2 import ServiceStatusEnum
+from ._GrpcToEnum import grpc_to_enum
+
+class ORM_ServiceStatusEnum(enum.Enum):
+    UNDEFINED       = ServiceStatusEnum.SERVICESTATUS_UNDEFINED
+    PLANNED         = ServiceStatusEnum.SERVICESTATUS_PLANNED
+    ACTIVE          = ServiceStatusEnum.SERVICESTATUS_ACTIVE
+    PENDING_REMOVAL = ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL
+
+grpc_to_enum__service_status = functools.partial(
+    grpc_to_enum, ServiceStatusEnum, ORM_ServiceStatusEnum)
diff --git a/src/context/service/database/models/enums/ServiceType.py b/src/context/service/database/models/enums/ServiceType.py
new file mode 100644
index 0000000000000000000000000000000000000000..e36cbc38934463b2d13d46380c6044554a19ed2a
--- /dev/null
+++ b/src/context/service/database/models/enums/ServiceType.py
@@ -0,0 +1,26 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum, functools
+from common.proto.context_pb2 import ServiceTypeEnum
+from ._GrpcToEnum import grpc_to_enum
+
+class ORM_ServiceTypeEnum(enum.Enum):
+    UNKNOWN                   = ServiceTypeEnum.SERVICETYPE_UNKNOWN
+    L3NM                      = ServiceTypeEnum.SERVICETYPE_L3NM
+    L2NM                      = ServiceTypeEnum.SERVICETYPE_L2NM
+    TAPI_CONNECTIVITY_SERVICE = ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE
+
+grpc_to_enum__service_type = functools.partial(
+    grpc_to_enum, ServiceTypeEnum, ORM_ServiceTypeEnum)
diff --git a/src/context/service/database/PolicyRuleModel.py b/src/context/service/database/models/enums/SliceStatus.py
similarity index 54%
rename from src/context/service/database/PolicyRuleModel.py
rename to src/context/service/database/models/enums/SliceStatus.py
index 7c84ea940482091a5667b2f11272748c7b444b6f..440f5ba2a6f616780f07d491abf6a1689229d36d 100644
--- a/src/context/service/database/PolicyRuleModel.py
+++ b/src/context/service/database/models/enums/SliceStatus.py
@@ -12,21 +12,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
-import json
-from typing import Dict
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
+import enum, functools
+from common.proto.context_pb2 import SliceStatusEnum
+from ._GrpcToEnum import grpc_to_enum
 
-LOGGER = logging.getLogger(__name__)
+class ORM_SliceStatusEnum(enum.Enum):
+    UNDEFINED = SliceStatusEnum.SLICESTATUS_UNDEFINED
+    PLANNED   = SliceStatusEnum.SLICESTATUS_PLANNED
+    INIT      = SliceStatusEnum.SLICESTATUS_INIT
+    ACTIVE    = SliceStatusEnum.SLICESTATUS_ACTIVE
+    DEINIT    = SliceStatusEnum.SLICESTATUS_DEINIT
 
-class PolicyRuleModel(Model):
-    pk = PrimaryKeyField()
-    value = StringField(required=True, allow_empty=False)
-
-    def dump_id(self) -> Dict:
-        return {'uuid': {'uuid': self.pk}}
-
-    def dump(self) -> Dict:
-        return json.loads(self.value)
+grpc_to_enum__slice_status = functools.partial(
+    grpc_to_enum, SliceStatusEnum, ORM_SliceStatusEnum)
diff --git a/src/context/service/database/models/enums/_GrpcToEnum.py b/src/context/service/database/models/enums/_GrpcToEnum.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4fe6c1cc04f3dbbfc6f2ab467e66da6a36fb057
--- /dev/null
+++ b/src/context/service/database/models/enums/_GrpcToEnum.py
@@ -0,0 +1,38 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+from enum import Enum
+from typing import Optional
+
+# Enumeration classes are redundant with gRPC classes, but gRPC does not provide a programmatical method to retrieve
+# the values it expects from strings containing the desired value symbol or its integer value, so a kind of mapping is
+# required. Besides, ORM Models expect Enum classes in EnumeratedFields; we create specific and conveniently defined
+# Enum classes to serve both purposes.
+
+def grpc_to_enum(grpc_enum_class, orm_enum_class : Enum, grpc_enum_value, grpc_enum_prefix : Optional[str] = None):
+    enum_name = grpc_enum_class.Name(grpc_enum_value)
+
+    if grpc_enum_prefix is None:
+        grpc_enum_prefix = orm_enum_class.__name__.upper()
+        #grpc_enum_prefix = re.sub(r'^ORM_(.+)$', r'\1', grpc_enum_prefix)
+        #grpc_enum_prefix = re.sub(r'^(.+)ENUM$', r'\1', grpc_enum_prefix)
+        #grpc_enum_prefix = grpc_enum_prefix + '_'
+        grpc_enum_prefix = re.sub(r'^ORM_(.+)ENUM$', r'\1_', grpc_enum_prefix)
+
+    if len(grpc_enum_prefix) > 0:
+        enum_name = enum_name.replace(grpc_enum_prefix, '')
+
+    orm_enum_value = orm_enum_class._member_map_.get(enum_name)
+    return orm_enum_value
diff --git a/src/context/service/database/models/enums/__init__.py b/src/context/service/database/models/enums/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a
--- /dev/null
+++ b/src/context/service/database/models/enums/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/device/service/database/Tools.py b/src/context/service/database/tools/FastHasher.py
similarity index 63%
rename from src/device/service/database/Tools.py
rename to src/context/service/database/tools/FastHasher.py
index 43bb71bd90582644c67d3ca528611eae937b6460..6632a1c794ed3be8533486614993bbe7a88650cb 100644
--- a/src/device/service/database/Tools.py
+++ b/src/context/service/database/tools/FastHasher.py
@@ -12,30 +12,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import hashlib, re
-from enum import Enum
-from typing import Dict, List, Tuple, Union
-
-# Convenient helper function to remove dictionary items in dict/list/set comprehensions.
-
-def remove_dict_key(dictionary : Dict, key : str):
-    dictionary.pop(key, None)
-    return dictionary
-
-# Enumeration classes are redundant with gRPC classes, but gRPC does not provide a programmatical method to retrieve
-# the values it expects from strings containing the desired value symbol or its integer value, so a kind of mapping is
-# required. Besides, ORM Models expect Enum classes in EnumeratedFields; we create specific and conveniently defined
-# Enum classes to serve both purposes.
-
-def grpc_to_enum(grpc_enum_class, orm_enum_class : Enum, grpc_enum_value):
-    grpc_enum_name = grpc_enum_class.Name(grpc_enum_value)
-    grpc_enum_prefix = orm_enum_class.__name__.upper()
-    grpc_enum_prefix = re.sub(r'^ORM_(.+)$', r'\1', grpc_enum_prefix)
-    grpc_enum_prefix = re.sub(r'^(.+)ENUM$', r'\1', grpc_enum_prefix)
-    grpc_enum_prefix = grpc_enum_prefix + '_'
-    orm_enum_name = grpc_enum_name.replace(grpc_enum_prefix, '')
-    orm_enum_value = orm_enum_class._member_map_.get(orm_enum_name) # pylint: disable=protected-access
-    return orm_enum_value
+import hashlib
+from typing import List, Tuple, Union
 
 # For some models, it is convenient to produce a string hash for fast comparisons of existence or modification. Method
 # fast_hasher computes configurable length (between 1 and 64 byte) hashes and retrieves them in hex representation.
diff --git a/src/context/service/database/tools/__init__.py b/src/context/service/database/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a
--- /dev/null
+++ b/src/context/service/database/tools/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/context/service/database/uuids/Connection.py b/src/context/service/database/uuids/Connection.py
new file mode 100644
index 0000000000000000000000000000000000000000..eea3b7214874aeea8dbd7a22147b18f2c1d6367c
--- /dev/null
+++ b/src/context/service/database/uuids/Connection.py
@@ -0,0 +1,33 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.proto.context_pb2 import ConnectionId
+from common.method_wrappers.ServiceExceptions import InvalidArgumentsException
+from ._Builder import get_uuid_from_string, get_uuid_random
+
+def connection_get_uuid(
+    connection_id : ConnectionId, connection_name : str = '', allow_random : bool = False
+) -> str:
+    connection_uuid = connection_id.connection_uuid.uuid
+
+    if len(connection_uuid) > 0:
+        return get_uuid_from_string(connection_uuid)
+    if len(connection_name) > 0:
+        return get_uuid_from_string(connection_name)
+    if allow_random: return get_uuid_random()
+
+    raise InvalidArgumentsException([
+        ('connection_id.connection_uuid.uuid', connection_uuid),
+        ('name', connection_name),
+    ], extra_details=['At least one is required to produce a Connection UUID'])
diff --git a/src/context/service/database/uuids/Context.py b/src/context/service/database/uuids/Context.py
new file mode 100644
index 0000000000000000000000000000000000000000..16876d686b939112aff56614a516aa5708f22a3b
--- /dev/null
+++ b/src/context/service/database/uuids/Context.py
@@ -0,0 +1,37 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId
+from common.method_wrappers.ServiceExceptions import InvalidArgumentsException
+from ._Builder import get_uuid_from_string, get_uuid_random
+
+def context_get_uuid(
+    context_id : ContextId, context_name : str = '', allow_random : bool = False, allow_default : bool = False
+) -> str:
+    context_uuid = context_id.context_uuid.uuid
+
+    if len(context_uuid) > 0:
+        return get_uuid_from_string(context_uuid)
+    if len(context_name) > 0:
+        return get_uuid_from_string(context_name)
+    if allow_default:
+        return get_uuid_from_string(DEFAULT_CONTEXT_NAME)
+    if allow_random:
+        return get_uuid_random()
+
+    raise InvalidArgumentsException([
+        ('context_id.context_uuid.uuid', context_uuid),
+        ('name', context_name),
+    ], extra_details=['At least one is required to produce a Context UUID'])
diff --git a/src/context/service/database/uuids/Device.py b/src/context/service/database/uuids/Device.py
new file mode 100644
index 0000000000000000000000000000000000000000..41391c8fa58468f7533ddaec4d357b3a350e25a6
--- /dev/null
+++ b/src/context/service/database/uuids/Device.py
@@ -0,0 +1,33 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.proto.context_pb2 import DeviceId
+from common.method_wrappers.ServiceExceptions import InvalidArgumentsException
+from ._Builder import get_uuid_from_string, get_uuid_random
+
+def device_get_uuid(
+    device_id : DeviceId, device_name : str = '', allow_random : bool = False
+) -> str:
+    device_uuid = device_id.device_uuid.uuid
+
+    if len(device_uuid) > 0:
+        return get_uuid_from_string(device_uuid)
+    if len(device_name) > 0:
+        return get_uuid_from_string(device_name)
+    if allow_random: return get_uuid_random()
+
+    raise InvalidArgumentsException([
+        ('device_id.device_uuid.uuid', device_uuid),
+        ('name', device_name),
+    ], extra_details=['At least one is required to produce a Device UUID'])
diff --git a/src/context/service/database/uuids/EndPoint.py b/src/context/service/database/uuids/EndPoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ceb39c4b478b30ece3c940b921bbb351583742b
--- /dev/null
+++ b/src/context/service/database/uuids/EndPoint.py
@@ -0,0 +1,41 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Tuple
+from common.proto.context_pb2 import EndPointId
+from common.method_wrappers.ServiceExceptions import InvalidArgumentsException
+from ._Builder import get_uuid_from_string, get_uuid_random
+from .Device import device_get_uuid
+from .Topology import topology_get_uuid
+
+def endpoint_get_uuid(
+    endpoint_id : EndPointId, endpoint_name : str = '', allow_random : bool = False
+) -> Tuple[str, str, str]:
+    device_uuid = device_get_uuid(endpoint_id.device_id, allow_random=False)
+    _,topology_uuid = topology_get_uuid(endpoint_id.topology_id, allow_random=False, allow_default=True)
+    raw_endpoint_uuid = endpoint_id.endpoint_uuid.uuid
+
+    if len(raw_endpoint_uuid) > 0:
+        prefix_for_name = '{:s}/{:s}'.format(topology_uuid, device_uuid)
+        return topology_uuid, device_uuid, get_uuid_from_string(raw_endpoint_uuid, prefix_for_name=prefix_for_name)
+    if len(endpoint_name) > 0:
+        prefix_for_name = '{:s}/{:s}'.format(topology_uuid, device_uuid)
+        return topology_uuid, device_uuid, get_uuid_from_string(endpoint_name, prefix_for_name=prefix_for_name)
+    if allow_random:
+        return topology_uuid, device_uuid, get_uuid_random()
+
+    raise InvalidArgumentsException([
+        ('endpoint_id.endpoint_uuid.uuid', raw_endpoint_uuid),
+        ('name', endpoint_name),
+    ], extra_details=['At least one is required to produce a EndPoint UUID'])
diff --git a/src/context/service/database/uuids/Link.py b/src/context/service/database/uuids/Link.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d68ed76fd806644d5e21bbd0d46c4805f130eb8
--- /dev/null
+++ b/src/context/service/database/uuids/Link.py
@@ -0,0 +1,33 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.proto.context_pb2 import LinkId
+from common.method_wrappers.ServiceExceptions import InvalidArgumentsException
+from ._Builder import get_uuid_from_string, get_uuid_random
+
+def link_get_uuid(
+    link_id : LinkId, link_name : str = '', allow_random : bool = False
+) -> str:
+    link_uuid = link_id.link_uuid.uuid
+
+    if len(link_uuid) > 0:
+        return get_uuid_from_string(link_uuid)
+    if len(link_name) > 0:
+        return get_uuid_from_string(link_name)
+    if allow_random: return get_uuid_random()
+
+    raise InvalidArgumentsException([
+        ('link_id.link_uuid.uuid', link_uuid),
+        ('name', link_name),
+    ], extra_details=['At least one is required to produce a Link UUID'])
diff --git a/src/context/service/database/uuids/PolicuRule.py b/src/context/service/database/uuids/PolicuRule.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbe691a2da79597b3b6364549ab7ba8f3f721f88
--- /dev/null
+++ b/src/context/service/database/uuids/PolicuRule.py
@@ -0,0 +1,29 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.proto.policy_pb2 import PolicyRuleId
+from common.method_wrappers.ServiceExceptions import InvalidArgumentException
+from ._Builder import get_uuid_from_string, get_uuid_random
+
+def policyrule_get_uuid(
+    policyrule_id : PolicyRuleId, allow_random : bool = False
+) -> str:
+    policyrule_uuid = policyrule_id.uuid.uuid
+
+    if len(policyrule_uuid) > 0:
+        return get_uuid_from_string(policyrule_uuid)
+    if allow_random: return get_uuid_random()
+
+    raise InvalidArgumentException(
+        'policyrule_id.uuid.uuid', policyrule_uuid, extra_details=['Required to produce a PolicyRule UUID'])
diff --git a/src/context/service/database/uuids/Service.py b/src/context/service/database/uuids/Service.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3d2059095de1f65efa2244cbc2614a05fea5508
--- /dev/null
+++ b/src/context/service/database/uuids/Service.py
@@ -0,0 +1,37 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Tuple
+from common.proto.context_pb2 import ServiceId
+from common.method_wrappers.ServiceExceptions import InvalidArgumentsException
+from ._Builder import get_uuid_from_string, get_uuid_random
+from .Context import context_get_uuid
+
+def service_get_uuid(
+    service_id : ServiceId, service_name : str = '', allow_random : bool = False
+) -> Tuple[str, str]:
+    context_uuid = context_get_uuid(service_id.context_id, allow_random=False)
+    raw_service_uuid = service_id.service_uuid.uuid
+
+    if len(raw_service_uuid) > 0:
+        return context_uuid, get_uuid_from_string(raw_service_uuid, prefix_for_name=context_uuid)
+    if len(service_name) > 0:
+        return context_uuid, get_uuid_from_string(service_name, prefix_for_name=context_uuid)
+    if allow_random:
+        return context_uuid, get_uuid_random()
+
+    raise InvalidArgumentsException([
+        ('service_id.service_uuid.uuid', raw_service_uuid),
+        ('name', service_name),
+    ], extra_details=['At least one is required to produce a Service UUID'])
diff --git a/src/context/service/database/uuids/Slice.py b/src/context/service/database/uuids/Slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7d1465dd33ff2610609962cc2fba2b0d8fa9cc9
--- /dev/null
+++ b/src/context/service/database/uuids/Slice.py
@@ -0,0 +1,37 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Tuple
+from common.proto.context_pb2 import SliceId
+from common.method_wrappers.ServiceExceptions import InvalidArgumentsException
+from ._Builder import get_uuid_from_string, get_uuid_random
+from .Context import context_get_uuid
+
+def slice_get_uuid(
+    slice_id : SliceId, slice_name : str = '', allow_random : bool = False
+) -> Tuple[str, str]:
+    context_uuid = context_get_uuid(slice_id.context_id, allow_random=False)
+    raw_slice_uuid = slice_id.slice_uuid.uuid
+
+    if len(raw_slice_uuid) > 0:
+        return context_uuid, get_uuid_from_string(raw_slice_uuid, prefix_for_name=context_uuid)
+    if len(slice_name) > 0:
+        return context_uuid, get_uuid_from_string(slice_name, prefix_for_name=context_uuid)
+    if allow_random:
+        return context_uuid, get_uuid_random()
+
+    raise InvalidArgumentsException([
+        ('slice_id.slice_uuid.uuid', raw_slice_uuid),
+        ('name', slice_name),
+    ], extra_details=['At least one is required to produce a Slice UUID'])
diff --git a/src/context/service/database/uuids/Topology.py b/src/context/service/database/uuids/Topology.py
new file mode 100644
index 0000000000000000000000000000000000000000..15387c9d6e4db3d654dc276d946642845f7b03c1
--- /dev/null
+++ b/src/context/service/database/uuids/Topology.py
@@ -0,0 +1,40 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Tuple
+from common.Constants import DEFAULT_TOPOLOGY_NAME
+from common.proto.context_pb2 import TopologyId
+from common.method_wrappers.ServiceExceptions import InvalidArgumentsException
+from ._Builder import get_uuid_from_string, get_uuid_random
+from .Context import context_get_uuid
+
+def topology_get_uuid(
+    topology_id : TopologyId, topology_name : str = '', allow_random : bool = False, allow_default : bool = False
+) -> Tuple[str, str]:
+    context_uuid = context_get_uuid(topology_id.context_id, allow_random=False, allow_default=allow_default)
+    raw_topology_uuid = topology_id.topology_uuid.uuid
+
+    if len(raw_topology_uuid) > 0:
+        return context_uuid, get_uuid_from_string(raw_topology_uuid, prefix_for_name=context_uuid)
+    if len(topology_name) > 0:
+        return context_uuid, get_uuid_from_string(topology_name, prefix_for_name=context_uuid)
+    if allow_default:
+        return context_uuid, get_uuid_from_string(DEFAULT_TOPOLOGY_NAME, prefix_for_name=context_uuid)
+    if allow_random:
+        return context_uuid, get_uuid_random()
+
+    raise InvalidArgumentsException([
+        ('topology_id.topology_uuid.uuid', raw_topology_uuid),
+        ('name', topology_name),
+    ], extra_details=['At least one is required to produce a Topology UUID'])
diff --git a/src/context/service/database/uuids/_Builder.py b/src/context/service/database/uuids/_Builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..55384433bdf55023d97ec7153a4e74a9c99b409f
--- /dev/null
+++ b/src/context/service/database/uuids/_Builder.py
@@ -0,0 +1,44 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Optional, Union
+from uuid import UUID, uuid4, uuid5
+
+# Generate a UUIDv5-like from the SHA-1 of "TFS" and no namespace to be used as the NAMESPACE for all
+# the context UUIDs generated. For efficiency purposes, the UUID is hardcoded; however, it is produced
+# using the following code:
+#    from hashlib import sha1
+#    from uuid import UUID
+#    hash = sha1(bytes('TFS', 'utf-8')).digest()
+#    NAMESPACE_TFS = UUID(bytes=hash[:16], version=5)
+NAMESPACE_TFS = UUID('200e3a1f-2223-534f-a100-758e29c37f40')
+
+def get_uuid_from_string(str_uuid_or_name : Union[str, UUID], prefix_for_name : Optional[str] = None) -> str:
+    # if UUID given, assume it is already a valid UUID
+    if isinstance(str_uuid_or_name, UUID): return str_uuid_or_name
+    if not isinstance(str_uuid_or_name, str):
+        MSG = 'Parameter({:s}) cannot be used to produce a UUID'
+        raise Exception(MSG.format(str(repr(str_uuid_or_name))))
+    try:
+        # try to parse as UUID
+        return str(UUID(str_uuid_or_name))
+    except: # pylint: disable=bare-except
+        # produce a UUID within TFS namespace from parameter
+        if prefix_for_name is not None:
+            str_uuid_or_name = '{:s}/{:s}'.format(prefix_for_name, str_uuid_or_name)
+        return str(uuid5(NAMESPACE_TFS, str_uuid_or_name))
+
+def get_uuid_random() -> str:
+    # Generate random UUID. No need to use namespace since "namespace + random = random".
+    return str(uuid4())
diff --git a/src/context/service/database/uuids/__init__.py b/src/context/service/database/uuids/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a
--- /dev/null
+++ b/src/context/service/database/uuids/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py
deleted file mode 100644
index b7130c7000663791b162bc15d5046d80ed71463d..0000000000000000000000000000000000000000
--- a/src/context/service/grpc_server/ContextServiceServicerImpl.py
+++ /dev/null
@@ -1,861 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import grpc, json, logging, operator, threading
-from typing import Iterator, List, Set, Tuple
-from common.message_broker.MessageBroker import MessageBroker
-from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
-from common.method_wrappers.ServiceExceptions import InvalidArgumentException
-from common.orm.Database import Database
-from common.orm.HighLevel import (
-    get_all_objects, get_object, get_or_create_object, get_related_objects, update_or_create_object)
-from common.orm.backend.Tools import key_to_str
-from common.proto.context_pb2 import (
-    Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList,
-    Context, ContextEvent, ContextId, ContextIdList, ContextList,
-    Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList,
-    Empty, EventTypeEnum,
-    Link, LinkEvent, LinkId, LinkIdList, LinkList,
-    Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList,
-    Slice, SliceEvent, SliceId, SliceIdList, SliceList,
-    Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
-from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule)
-from common.proto.context_pb2_grpc import ContextServiceServicer
-from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer
-from common.tools.grpc.Tools import grpc_message_to_json
-from context.service.database.ConfigModel import update_config
-from context.service.database.ConnectionModel import ConnectionModel, set_path
-from context.service.database.ConstraintModel import set_constraints
-from context.service.database.ContextModel import ContextModel
-from context.service.database.PolicyRuleModel import PolicyRuleModel
-from context.service.database.DeviceModel import DeviceModel, grpc_to_enum__device_operational_status, set_drivers
-from context.service.database.EndPointModel import EndPointModel, set_kpi_sample_types
-from context.service.database.Events import notify_event
-from context.service.database.LinkModel import LinkModel
-from context.service.database.RelationModels import (
-    ConnectionSubServiceModel, LinkEndPointModel, ServiceEndPointModel, SliceEndPointModel, SliceServiceModel,
-    SliceSubSliceModel, TopologyDeviceModel, TopologyLinkModel)
-from context.service.database.ServiceModel import (
-    ServiceModel, grpc_to_enum__service_status, grpc_to_enum__service_type)
-from context.service.database.SliceModel import SliceModel, grpc_to_enum__slice_status
-from context.service.database.TopologyModel import TopologyModel
-from .Constants import (
-    CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE,
-    TOPIC_TOPOLOGY)
-
-LOGGER = logging.getLogger(__name__)
-
-METRICS_POOL = MetricsPool('Context', 'RPC')
-
-class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceServicer):
-    def __init__(self, database : Database, messagebroker : MessageBroker):
-        LOGGER.debug('Creating Servicer...')
-        self.lock = threading.Lock()
-        self.database = database
-        self.messagebroker = messagebroker
-        LOGGER.debug('Servicer Created')
-
-
-    # ----- Context ----------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListContextIds(self, request: Empty, context : grpc.ServicerContext) -> ContextIdList:
-        with self.lock:
-            db_contexts : List[ContextModel] = get_all_objects(self.database, ContextModel)
-            db_contexts = sorted(db_contexts, key=operator.attrgetter('pk'))
-            return ContextIdList(context_ids=[db_context.dump_id() for db_context in db_contexts])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListContexts(self, request: Empty, context : grpc.ServicerContext) -> ContextList:
-        with self.lock:
-            db_contexts : List[ContextModel] = get_all_objects(self.database, ContextModel)
-            db_contexts = sorted(db_contexts, key=operator.attrgetter('pk'))
-            return ContextList(contexts=[db_context.dump() for db_context in db_contexts])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetContext(self, request: ContextId, context : grpc.ServicerContext) -> Context:
-        with self.lock:
-            context_uuid = request.context_uuid.uuid
-            db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
-            return Context(**db_context.dump(include_services=True, include_topologies=True))
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId:
-        with self.lock:
-            context_uuid = request.context_id.context_uuid.uuid
-
-            for i,topology_id in enumerate(request.topology_ids):
-                topology_context_uuid = topology_id.context_id.context_uuid.uuid
-                if topology_context_uuid != context_uuid:
-                    raise InvalidArgumentException(
-                        'request.topology_ids[{:d}].context_id.context_uuid.uuid'.format(i), topology_context_uuid,
-                        ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)])
-
-            for i,service_id in enumerate(request.service_ids):
-                service_context_uuid = service_id.context_id.context_uuid.uuid
-                if service_context_uuid != context_uuid:
-                    raise InvalidArgumentException(
-                        'request.service_ids[{:d}].context_id.context_uuid.uuid'.format(i), service_context_uuid,
-                        ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)])
-
-            result : Tuple[ContextModel, bool] = update_or_create_object(
-                self.database, ContextModel, context_uuid, {'context_uuid': context_uuid})
-            db_context, updated = result
-
-            for i,topology_id in enumerate(request.topology_ids):
-                topology_context_uuid = topology_id.context_id.context_uuid.uuid
-                topology_uuid = topology_id.topology_uuid.uuid
-                get_object(self.database, TopologyModel, [context_uuid, topology_uuid]) # just to confirm it exists
-
-            for i,service_id in enumerate(request.service_ids):
-                service_context_uuid = service_id.context_id.context_uuid.uuid
-                service_uuid = service_id.service_uuid.uuid
-                get_object(self.database, ServiceModel, [context_uuid, service_uuid]) # just to confirm it exists
-
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            dict_context_id = db_context.dump_id()
-            notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': dict_context_id})
-            return ContextId(**dict_context_id)
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty:
-        with self.lock:
-            context_uuid = request.context_uuid.uuid
-            db_context = ContextModel(self.database, context_uuid, auto_load=False)
-            found = db_context.load()
-            if not found: return Empty()
-
-            dict_context_id = db_context.dump_id()
-            db_context.delete()
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': dict_context_id})
-            return Empty()
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]:
-        for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT):
-            yield ContextEvent(**json.loads(message.content))
-
-
-    # ----- Topology ---------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList:
-        with self.lock:
-            context_uuid = request.context_uuid.uuid
-            db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
-            db_topologies : Set[TopologyModel] = get_related_objects(db_context, TopologyModel)
-            db_topologies = sorted(db_topologies, key=operator.attrgetter('pk'))
-            return TopologyIdList(topology_ids=[db_topology.dump_id() for db_topology in db_topologies])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListTopologies(self, request: ContextId, context : grpc.ServicerContext) -> TopologyList:
-        with self.lock:
-            context_uuid = request.context_uuid.uuid
-            db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
-            db_topologies : Set[TopologyModel] = get_related_objects(db_context, TopologyModel)
-            db_topologies = sorted(db_topologies, key=operator.attrgetter('pk'))
-            return TopologyList(topologies=[db_topology.dump() for db_topology in db_topologies])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology:
-        with self.lock:
-            str_key = key_to_str([request.context_id.context_uuid.uuid, request.topology_uuid.uuid])
-            db_topology : TopologyModel = get_object(self.database, TopologyModel, str_key)
-            return Topology(**db_topology.dump(include_devices=True, include_links=True))
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId:
-        with self.lock:
-            context_uuid = request.topology_id.context_id.context_uuid.uuid
-            db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
-
-            topology_uuid = request.topology_id.topology_uuid.uuid
-            str_topology_key = key_to_str([context_uuid, topology_uuid])
-            result : Tuple[TopologyModel, bool] = update_or_create_object(
-                self.database, TopologyModel, str_topology_key, {
-                    'context_fk': db_context, 'topology_uuid': topology_uuid})
-            db_topology,updated = result
-
-            for device_id in request.device_ids:
-                device_uuid = device_id.device_uuid.uuid
-                db_device = get_object(self.database, DeviceModel, device_uuid)
-                str_topology_device_key = key_to_str([str_topology_key, device_uuid], separator='--')
-                result : Tuple[TopologyDeviceModel, bool] = update_or_create_object(
-                    self.database, TopologyDeviceModel, str_topology_device_key,
-                    {'topology_fk': db_topology, 'device_fk': db_device})
-                #db_topology_device,topology_device_updated = result
-
-            for link_id in request.link_ids:
-                link_uuid = link_id.link_uuid.uuid
-                db_link = get_object(self.database, LinkModel, link_uuid)
-
-                str_topology_link_key = key_to_str([str_topology_key, link_uuid], separator='--')
-                result : Tuple[TopologyLinkModel, bool] = update_or_create_object(
-                    self.database, TopologyLinkModel, str_topology_link_key,
-                    {'topology_fk': db_topology, 'link_fk': db_link})
-                #db_topology_link,topology_link_updated = result
-
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            dict_topology_id = db_topology.dump_id()
-            notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id})
-            return TopologyId(**dict_topology_id)
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty:
-        with self.lock:
-            context_uuid = request.context_id.context_uuid.uuid
-            topology_uuid = request.topology_uuid.uuid
-            db_topology = TopologyModel(self.database, key_to_str([context_uuid, topology_uuid]), auto_load=False)
-            found = db_topology.load()
-            if not found: return Empty()
-
-            dict_topology_id = db_topology.dump_id()
-            db_topology.delete()
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id})
-            return Empty()
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]:
-        for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT):
-            yield TopologyEvent(**json.loads(message.content))
-
-
-    # ----- Device -----------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListDeviceIds(self, request: Empty, context : grpc.ServicerContext) -> DeviceIdList:
-        with self.lock:
-            db_devices : List[DeviceModel] = get_all_objects(self.database, DeviceModel)
-            db_devices = sorted(db_devices, key=operator.attrgetter('pk'))
-            return DeviceIdList(device_ids=[db_device.dump_id() for db_device in db_devices])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListDevices(self, request: Empty, context : grpc.ServicerContext) -> DeviceList:
-        with self.lock:
-            db_devices : List[DeviceModel] = get_all_objects(self.database, DeviceModel)
-            db_devices = sorted(db_devices, key=operator.attrgetter('pk'))
-            return DeviceList(devices=[db_device.dump() for db_device in db_devices])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Device:
-        with self.lock:
-            device_uuid = request.device_uuid.uuid
-            db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid)
-            return Device(**db_device.dump(
-                include_config_rules=True, include_drivers=True, include_endpoints=True))
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def SetDevice(self, request: Device, context : grpc.ServicerContext) -> DeviceId:
-        with self.lock:
-            device_uuid = request.device_id.device_uuid.uuid
-
-            for i,endpoint in enumerate(request.device_endpoints):
-                endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid
-                if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid
-                if device_uuid != endpoint_device_uuid:
-                    raise InvalidArgumentException(
-                        'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid,
-                        ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)])
-
-            running_config_rules = update_config(
-                self.database, device_uuid, 'device', request.device_config.config_rules)
-            db_running_config = running_config_rules[0][0]
-
-            result : Tuple[DeviceModel, bool] = update_or_create_object(self.database, DeviceModel, device_uuid, {
-                'device_uuid'              : device_uuid,
-                'device_type'              : request.device_type,
-                'device_operational_status': grpc_to_enum__device_operational_status(request.device_operational_status),
-                'device_config_fk'         : db_running_config,
-            })
-            db_device, updated = result
-
-            set_drivers(self.database, db_device, request.device_drivers)
-
-            for i,endpoint in enumerate(request.device_endpoints):
-                endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
-                endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid
-                if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid
-
-                str_endpoint_key = key_to_str([device_uuid, endpoint_uuid])
-                endpoint_attributes = {
-                    'device_fk'    : db_device,
-                    'endpoint_uuid': endpoint_uuid,
-                    'endpoint_type': endpoint.endpoint_type,
-                }
-
-                endpoint_topology_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid
-                endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid
-                if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
-                    str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
-                    db_topology : TopologyModel = get_object(self.database, TopologyModel, str_topology_key)
-
-                    str_topology_device_key = key_to_str([str_topology_key, device_uuid], separator='--')
-                    result : Tuple[TopologyDeviceModel, bool] = get_or_create_object(
-                        self.database, TopologyDeviceModel, str_topology_device_key, {
-                            'topology_fk': db_topology, 'device_fk': db_device})
-                    #db_topology_device, topology_device_created = result
-
-                    str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
-                    endpoint_attributes['topology_fk'] = db_topology
-
-                result : Tuple[EndPointModel, bool] = update_or_create_object(
-                    self.database, EndPointModel, str_endpoint_key, endpoint_attributes)
-                db_endpoint, endpoint_updated = result # pylint: disable=unused-variable
-
-                set_kpi_sample_types(self.database, db_endpoint, endpoint.kpi_sample_types)
-
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            dict_device_id = db_device.dump_id()
-            notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id})
-            return DeviceId(**dict_device_id)
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def RemoveDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Empty:
-        with self.lock:
-            device_uuid = request.device_uuid.uuid
-            db_device = DeviceModel(self.database, device_uuid, auto_load=False)
-            found = db_device.load()
-            if not found: return Empty()
-
-            dict_device_id = db_device.dump_id()
-            db_device.delete()
-
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id})
-            return Empty()
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetDeviceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]:
-        for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT):
-            yield DeviceEvent(**json.loads(message.content))
-
-
-    # ----- Link -------------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListLinkIds(self, request: Empty, context : grpc.ServicerContext) -> LinkIdList:
-        with self.lock:
-            db_links : List[LinkModel] = get_all_objects(self.database, LinkModel)
-            db_links = sorted(db_links, key=operator.attrgetter('pk'))
-            return LinkIdList(link_ids=[db_link.dump_id() for db_link in db_links])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListLinks(self, request: Empty, context : grpc.ServicerContext) -> LinkList:
-        with self.lock:
-            db_links : List[LinkModel] = get_all_objects(self.database, LinkModel)
-            db_links = sorted(db_links, key=operator.attrgetter('pk'))
-            return LinkList(links=[db_link.dump() for db_link in db_links])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetLink(self, request: LinkId, context : grpc.ServicerContext) -> Link:
-        with self.lock:
-            link_uuid = request.link_uuid.uuid
-            db_link : LinkModel = get_object(self.database, LinkModel, link_uuid)
-            return Link(**db_link.dump())
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def SetLink(self, request: Link, context : grpc.ServicerContext) -> LinkId:
-        with self.lock:
-            link_uuid = request.link_id.link_uuid.uuid
-            result : Tuple[LinkModel, bool] = update_or_create_object(
-                self.database, LinkModel, link_uuid, {'link_uuid': link_uuid})
-            db_link, updated = result
-
-            for endpoint_id in request.link_endpoint_ids:
-                endpoint_uuid                  = endpoint_id.endpoint_uuid.uuid
-                endpoint_device_uuid           = endpoint_id.device_id.device_uuid.uuid
-                endpoint_topology_uuid         = endpoint_id.topology_id.topology_uuid.uuid
-                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-
-                str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid])
-
-                db_topology = None
-                if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
-                    str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
-                    db_topology : TopologyModel = get_object(self.database, TopologyModel, str_topology_key)
-                    str_topology_device_key = key_to_str([str_topology_key, endpoint_device_uuid], separator='--')
-                    # check device is in topology
-                    get_object(self.database, TopologyDeviceModel, str_topology_device_key)
-                    str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
-
-                db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key)
-
-                str_link_endpoint_key = key_to_str([link_uuid, endpoint_device_uuid], separator='--')
-                result : Tuple[LinkEndPointModel, bool] = get_or_create_object(
-                    self.database, LinkEndPointModel, str_link_endpoint_key, {
-                        'link_fk': db_link, 'endpoint_fk': db_endpoint})
-                #db_link_endpoint, link_endpoint_created = result
-
-                if db_topology is not None:
-                    str_topology_link_key = key_to_str([str_topology_key, link_uuid], separator='--')
-                    result : Tuple[TopologyLinkModel, bool] = get_or_create_object(
-                        self.database, TopologyLinkModel, str_topology_link_key, {
-                            'topology_fk': db_topology, 'link_fk': db_link})
-                    #db_topology_link, topology_link_created = result
-
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            dict_link_id = db_link.dump_id()
-            notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id})
-            return LinkId(**dict_link_id)
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def RemoveLink(self, request: LinkId, context : grpc.ServicerContext) -> Empty:
-        with self.lock:
-            link_uuid = request.link_uuid.uuid
-            db_link = LinkModel(self.database, link_uuid, auto_load=False)
-            found = db_link.load()
-            if not found: return Empty()
-
-            dict_link_id = db_link.dump_id()
-            db_link.delete()
-
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id})
-            return Empty()
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetLinkEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]:
-        for message in self.messagebroker.consume({TOPIC_LINK}, consume_timeout=CONSUME_TIMEOUT):
-            yield LinkEvent(**json.loads(message.content))
-
-
-    # ----- Service ----------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListServiceIds(self, request: ContextId, context : grpc.ServicerContext) -> ServiceIdList:
-        with self.lock:
-            db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid)
-            db_services : Set[ServiceModel] = get_related_objects(db_context, ServiceModel)
-            db_services = sorted(db_services, key=operator.attrgetter('pk'))
-            return ServiceIdList(service_ids=[db_service.dump_id() for db_service in db_services])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListServices(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList:
-        with self.lock:
-            db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid)
-            db_services : Set[ServiceModel] = get_related_objects(db_context, ServiceModel)
-            db_services = sorted(db_services, key=operator.attrgetter('pk'))
-            return ServiceList(services=[db_service.dump() for db_service in db_services])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetService(self, request: ServiceId, context : grpc.ServicerContext) -> Service:
-        with self.lock:
-            str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid])
-            db_service : ServiceModel = get_object(self.database, ServiceModel, str_key)
-            return Service(**db_service.dump(
-                include_endpoint_ids=True, include_constraints=True, include_config_rules=True))
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId:
-        with self.lock:
-            context_uuid = request.service_id.context_id.context_uuid.uuid
-            db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
-
-            for i,endpoint_id in enumerate(request.service_endpoint_ids):
-                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-                if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid:
-                    raise InvalidArgumentException(
-                        'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
-                        endpoint_topology_context_uuid,
-                        ['should be == {:s}({:s})'.format(
-                            'request.service_id.context_id.context_uuid.uuid', context_uuid)])
-
-            service_uuid = request.service_id.service_uuid.uuid
-            str_service_key = key_to_str([context_uuid, service_uuid])
-
-            constraints_result = set_constraints(
-                self.database, str_service_key, 'service', request.service_constraints)
-            db_constraints = constraints_result[0][0]
-
-            running_config_rules = update_config(
-                self.database, str_service_key, 'service', request.service_config.config_rules)
-            db_running_config = running_config_rules[0][0]
-
-            result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, {
-                'context_fk'            : db_context,
-                'service_uuid'          : service_uuid,
-                'service_type'          : grpc_to_enum__service_type(request.service_type),
-                'service_constraints_fk': db_constraints,
-                'service_status'        : grpc_to_enum__service_status(request.service_status.service_status),
-                'service_config_fk'     : db_running_config,
-            })
-            db_service, updated = result
-
-            for i,endpoint_id in enumerate(request.service_endpoint_ids):
-                endpoint_uuid                  = endpoint_id.endpoint_uuid.uuid
-                endpoint_device_uuid           = endpoint_id.device_id.device_uuid.uuid
-                endpoint_topology_uuid         = endpoint_id.topology_id.topology_uuid.uuid
-                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-
-                str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid])
-                if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
-                    str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
-                    str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
-
-                db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key)
-
-                str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--')
-                result : Tuple[ServiceEndPointModel, bool] = get_or_create_object(
-                    self.database, ServiceEndPointModel, str_service_endpoint_key, {
-                        'service_fk': db_service, 'endpoint_fk': db_endpoint})
-                #db_service_endpoint, service_endpoint_created = result
-
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            dict_service_id = db_service.dump_id()
-            notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id})
-            return ServiceId(**dict_service_id)
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty:
-        with self.lock:
-            context_uuid = request.context_id.context_uuid.uuid
-            service_uuid = request.service_uuid.uuid
-            db_service = ServiceModel(self.database, key_to_str([context_uuid, service_uuid]), auto_load=False)
-            found = db_service.load()
-            if not found: return Empty()
-
-            dict_service_id = db_service.dump_id()
-            db_service.delete()
-
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id})
-            return Empty()
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetServiceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]:
-        for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT):
-            yield ServiceEvent(**json.loads(message.content))
-
-
-    # ----- Slice ----------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListSliceIds(self, request: ContextId, context : grpc.ServicerContext) -> SliceIdList:
-        with self.lock:
-            db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid)
-            db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel)
-            db_slices = sorted(db_slices, key=operator.attrgetter('pk'))
-            return SliceIdList(slice_ids=[db_slice.dump_id() for db_slice in db_slices])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListSlices(self, request: ContextId, context : grpc.ServicerContext) -> SliceList:
-        with self.lock:
-            db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid)
-            db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel)
-            db_slices = sorted(db_slices, key=operator.attrgetter('pk'))
-            return SliceList(slices=[db_slice.dump() for db_slice in db_slices])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetSlice(self, request: SliceId, context : grpc.ServicerContext) -> Slice:
-        with self.lock:
-            str_key = key_to_str([request.context_id.context_uuid.uuid, request.slice_uuid.uuid])
-            db_slice : SliceModel = get_object(self.database, SliceModel, str_key)
-            return Slice(**db_slice.dump(
-                include_endpoint_ids=True, include_constraints=True, include_config_rules=True,
-                include_service_ids=True, include_subslice_ids=True))
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def SetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId:
-        with self.lock:
-            context_uuid = request.slice_id.context_id.context_uuid.uuid
-            db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
-
-            for i,endpoint_id in enumerate(request.slice_endpoint_ids):
-                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-                if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid:
-                    raise InvalidArgumentException(
-                        'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
-                        endpoint_topology_context_uuid,
-                        ['should be == {:s}({:s})'.format(
-                            'request.slice_id.context_id.context_uuid.uuid', context_uuid)])
-
-            slice_uuid = request.slice_id.slice_uuid.uuid
-            str_slice_key = key_to_str([context_uuid, slice_uuid])
-
-            constraints_result = set_constraints(
-                self.database, str_slice_key, 'slice', request.slice_constraints)
-            db_constraints = constraints_result[0][0]
-
-            running_config_rules = update_config(
-                self.database, str_slice_key, 'slice', request.slice_config.config_rules)
-            db_running_config = running_config_rules[0][0]
-
-            result : Tuple[SliceModel, bool] = update_or_create_object(self.database, SliceModel, str_slice_key, {
-                'context_fk'          : db_context,
-                'slice_uuid'          : slice_uuid,
-                'slice_constraints_fk': db_constraints,
-                'slice_status'        : grpc_to_enum__slice_status(request.slice_status.slice_status),
-                'slice_config_fk'     : db_running_config,
-                'slice_owner_uuid'    : request.slice_owner.owner_uuid.uuid,
-                'slice_owner_string'  : request.slice_owner.owner_string,
-            })
-            db_slice, updated = result
-
-            for i,endpoint_id in enumerate(request.slice_endpoint_ids):
-                endpoint_uuid                  = endpoint_id.endpoint_uuid.uuid
-                endpoint_device_uuid           = endpoint_id.device_id.device_uuid.uuid
-                endpoint_topology_uuid         = endpoint_id.topology_id.topology_uuid.uuid
-                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-
-                str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid])
-                if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
-                    str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
-                    str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
-
-                db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key)
-
-                str_slice_endpoint_key = key_to_str([str_slice_key, str_endpoint_key], separator='--')
-                result : Tuple[SliceEndPointModel, bool] = get_or_create_object(
-                    self.database, SliceEndPointModel, str_slice_endpoint_key, {
-                        'slice_fk': db_slice, 'endpoint_fk': db_endpoint})
-                #db_slice_endpoint, slice_endpoint_created = result
-
-            for i,service_id in enumerate(request.slice_service_ids):
-                service_uuid         = service_id.service_uuid.uuid
-                service_context_uuid = service_id.context_id.context_uuid.uuid
-                str_service_key = key_to_str([service_context_uuid, service_uuid])
-                db_service : ServiceModel = get_object(self.database, ServiceModel, str_service_key)
-
-                str_slice_service_key = key_to_str([str_slice_key, str_service_key], separator='--')
-                result : Tuple[SliceServiceModel, bool] = get_or_create_object(
-                    self.database, SliceServiceModel, str_slice_service_key, {
-                        'slice_fk': db_slice, 'service_fk': db_service})
-                #db_slice_service, slice_service_created = result
-
-            for i,subslice_id in enumerate(request.slice_subslice_ids):
-                subslice_uuid         = subslice_id.slice_uuid.uuid
-                subslice_context_uuid = subslice_id.context_id.context_uuid.uuid
-                str_subslice_key = key_to_str([subslice_context_uuid, subslice_uuid])
-                db_subslice : SliceModel = get_object(self.database, SliceModel, str_subslice_key)
-
-                str_slice_subslice_key = key_to_str([str_slice_key, str_subslice_key], separator='--')
-                result : Tuple[SliceSubSliceModel, bool] = get_or_create_object(
-                    self.database, SliceSubSliceModel, str_slice_subslice_key, {
-                        'slice_fk': db_slice, 'sub_slice_fk': db_subslice})
-                #db_slice_subslice, slice_subslice_created = result
-
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            dict_slice_id = db_slice.dump_id()
-            notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id})
-            return SliceId(**dict_slice_id)
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def UnsetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId:
-        with self.lock:
-            context_uuid = request.slice_id.context_id.context_uuid.uuid
-            db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
-
-            for i,endpoint_id in enumerate(request.slice_endpoint_ids):
-                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-                if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid:
-                    raise InvalidArgumentException(
-                        'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
-                        endpoint_topology_context_uuid,
-                        ['should be == {:s}({:s})'.format(
-                            'request.slice_id.context_id.context_uuid.uuid', context_uuid)])
-
-            slice_uuid = request.slice_id.slice_uuid.uuid
-            str_slice_key = key_to_str([context_uuid, slice_uuid])
-
-            if len(request.slice_constraints) > 0:
-                raise NotImplementedError('UnsetSlice: removal of constraints')
-            if len(request.slice_config.config_rules) > 0:
-                raise NotImplementedError('UnsetSlice: removal of config rules')
-            if len(request.slice_endpoint_ids) > 0:
-                raise NotImplementedError('UnsetSlice: removal of endpoints')
-
-            updated = False
-
-            for service_id in request.slice_service_ids:
-                service_uuid         = service_id.service_uuid.uuid
-                service_context_uuid = service_id.context_id.context_uuid.uuid
-                str_service_key = key_to_str([service_context_uuid, service_uuid])
-                str_slice_service_key = key_to_str([str_slice_key, str_service_key], separator='--')
-                SliceServiceModel(self.database, str_slice_service_key).delete()
-                updated = True
-
-            for subslice_id in request.slice_subslice_ids:
-                subslice_uuid         = subslice_id.slice_uuid.uuid
-                subslice_context_uuid = subslice_id.context_id.context_uuid.uuid
-                str_subslice_key = key_to_str([subslice_context_uuid, subslice_uuid])
-                str_slice_subslice_key = key_to_str([str_slice_key, str_subslice_key], separator='--')
-                SliceSubSliceModel(self.database, str_slice_subslice_key).delete()
-                updated = True
-
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            db_slice : SliceModel = get_object(self.database, SliceModel, str_slice_key)
-            dict_slice_id = db_slice.dump_id()
-            notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id})
-            return SliceId(**dict_slice_id)
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def RemoveSlice(self, request: SliceId, context : grpc.ServicerContext) -> Empty:
-        with self.lock:
-            context_uuid = request.context_id.context_uuid.uuid
-            slice_uuid = request.slice_uuid.uuid
-            db_slice = SliceModel(self.database, key_to_str([context_uuid, slice_uuid]), auto_load=False)
-            found = db_slice.load()
-            if not found: return Empty()
-
-            dict_slice_id = db_slice.dump_id()
-            db_slice.delete()
-
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id})
-            return Empty()
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetSliceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]:
-        for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT):
-            yield SliceEvent(**json.loads(message.content))
-
-
-    # ----- Connection -------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListConnectionIds(self, request: ServiceId, context : grpc.ServicerContext) -> ConnectionIdList:
-        with self.lock:
-            str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid])
-            db_service : ServiceModel = get_object(self.database, ServiceModel, str_key)
-            db_connections : Set[ConnectionModel] = get_related_objects(db_service, ConnectionModel)
-            db_connections = sorted(db_connections, key=operator.attrgetter('pk'))
-            return ConnectionIdList(connection_ids=[db_connection.dump_id() for db_connection in db_connections])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListConnections(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList:
-        with self.lock:
-            str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid])
-            db_service : ServiceModel = get_object(self.database, ServiceModel, str_key)
-            db_connections : Set[ConnectionModel] = get_related_objects(db_service, ConnectionModel)
-            db_connections = sorted(db_connections, key=operator.attrgetter('pk'))
-            return ConnectionList(connections=[db_connection.dump() for db_connection in db_connections])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Connection:
-        with self.lock:
-            db_connection : ConnectionModel = get_object(self.database, ConnectionModel, request.connection_uuid.uuid)
-            return Connection(**db_connection.dump(include_path=True, include_sub_service_ids=True))
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def SetConnection(self, request: Connection, context : grpc.ServicerContext) -> ConnectionId:
-        with self.lock:
-            connection_uuid = request.connection_id.connection_uuid.uuid
-
-            connection_attributes = {'connection_uuid': connection_uuid}
-
-            service_context_uuid = request.service_id.context_id.context_uuid.uuid
-            service_uuid = request.service_id.service_uuid.uuid
-            if len(service_context_uuid) > 0 and len(service_uuid) > 0:
-                str_service_key = key_to_str([service_context_uuid, service_uuid])
-                db_service : ServiceModel = get_object(self.database, ServiceModel, str_service_key)
-                connection_attributes['service_fk'] = db_service
-
-            path_hops_result = set_path(self.database, connection_uuid, request.path_hops_endpoint_ids, path_name = '')
-            db_path = path_hops_result[0]
-            connection_attributes['path_fk'] = db_path
-
-            result : Tuple[ConnectionModel, bool] = update_or_create_object(
-                self.database, ConnectionModel, connection_uuid, connection_attributes)
-            db_connection, updated = result
-
-            for sub_service_id in request.sub_service_ids:
-                sub_service_uuid         = sub_service_id.service_uuid.uuid
-                sub_service_context_uuid = sub_service_id.context_id.context_uuid.uuid
-                str_sub_service_key = key_to_str([sub_service_context_uuid, sub_service_uuid])
-                db_service : ServiceModel = get_object(self.database, ServiceModel, str_sub_service_key)
-
-                str_connection_sub_service_key = key_to_str([connection_uuid, str_sub_service_key], separator='--')
-                result : Tuple[ConnectionSubServiceModel, bool] = get_or_create_object(
-                    self.database, ConnectionSubServiceModel, str_connection_sub_service_key, {
-                        'connection_fk': db_connection, 'sub_service_fk': db_service})
-                #db_connection_sub_service, connection_sub_service_created = result
-
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            dict_connection_id = db_connection.dump_id()
-            notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id})
-            return ConnectionId(**dict_connection_id)
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def RemoveConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Empty:
-        with self.lock:
-            db_connection = ConnectionModel(self.database, request.connection_uuid.uuid, auto_load=False)
-            found = db_connection.load()
-            if not found: return Empty()
-
-            dict_connection_id = db_connection.dump_id()
-            db_connection.delete()
-
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id})
-            return Empty()
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]:
-        for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT):
-            yield ConnectionEvent(**json.loads(message.content))
-
-
-    # ----- Policy -----------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListPolicyRuleIds(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleIdList:
-        with self.lock:
-            db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel)
-            db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk'))
-            return PolicyRuleIdList(policyRuleIdList=[db_policy_rule.dump_id() for db_policy_rule in db_policy_rules])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def ListPolicyRules(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleList:
-        with self.lock:
-            db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel)
-            db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk'))
-            return PolicyRuleList(policyRules=[db_policy_rule.dump() for db_policy_rule in db_policy_rules])
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetPolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule:
-        with self.lock:
-            policy_rule_uuid = request.uuid.uuid
-            db_policy_rule: PolicyRuleModel = get_object(self.database, PolicyRuleModel, policy_rule_uuid)
-            return PolicyRule(**db_policy_rule.dump())
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def SetPolicyRule(self, request: PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId:
-        with self.lock:
-            policy_rule_type = request.WhichOneof('policy_rule')
-            policy_rule_json = grpc_message_to_json(request)
-            policy_rule_uuid = policy_rule_json[policy_rule_type]['policyRuleBasic']['policyRuleId']['uuid']['uuid']
-            result: Tuple[PolicyRuleModel, bool] = update_or_create_object(
-                self.database, PolicyRuleModel, policy_rule_uuid, {'value': json.dumps(policy_rule_json)})
-            db_policy, updated = result # pylint: disable=unused-variable
-
-            #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            dict_policy_id = db_policy.dump_id()
-            #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id})
-            return PolicyRuleId(**dict_policy_id)
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def RemovePolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> Empty:
-        with self.lock:
-            policy_uuid = request.uuid.uuid
-            db_policy = PolicyRuleModel(self.database, policy_uuid, auto_load=False)
-            found = db_policy.load()
-            if not found: return Empty()
-
-            dict_policy_id = db_policy.dump_id()
-            db_policy.delete()
-            #event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id})
-            return Empty()
diff --git a/src/context/service/rest_server/Resources.py b/src/context/service/rest_server/Resources.py
deleted file mode 100644
index 5f03132a34004388596ce1fdfac470f029c093ea..0000000000000000000000000000000000000000
--- a/src/context/service/rest_server/Resources.py
+++ /dev/null
@@ -1,246 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from flask import make_response
-from flask.json import jsonify
-from flask_restful import Resource
-from common.orm.Database import Database
-from common.proto.context_pb2 import ConnectionId, ContextId, DeviceId, Empty, LinkId, ServiceId, SliceId, TopologyId
-from common.proto.policy_pb2 import PolicyRuleId
-from common.tools.grpc.Tools import grpc_message_to_json
-from context.service.grpc_server.ContextServiceServicerImpl import ContextServiceServicerImpl
-
-def format_grpc_to_json(grpc_reply):
-    return jsonify(grpc_message_to_json(grpc_reply))
-
-def grpc_connection_id(connection_uuid):
-    return ConnectionId(**{
-        'connection_uuid': {'uuid': connection_uuid}
-    })
-
-def grpc_context_id(context_uuid):
-    return ContextId(**{
-        'context_uuid': {'uuid': context_uuid}
-    })
-
-def grpc_device_id(device_uuid):
-    return DeviceId(**{
-        'device_uuid': {'uuid': device_uuid}
-    })
-
-def grpc_link_id(link_uuid):
-    return LinkId(**{
-        'link_uuid': {'uuid': link_uuid}
-    })
-
-def grpc_service_id(context_uuid, service_uuid):
-    return ServiceId(**{
-        'context_id': {'context_uuid': {'uuid': context_uuid}},
-        'service_uuid': {'uuid': service_uuid}
-    })
-
-def grpc_slice_id(context_uuid, slice_uuid):
-    return SliceId(**{
-        'context_id': {'context_uuid': {'uuid': context_uuid}},
-        'slice_uuid': {'uuid': slice_uuid}
-    })
-
-def grpc_topology_id(context_uuid, topology_uuid):
-    return TopologyId(**{
-        'context_id': {'context_uuid': {'uuid': context_uuid}},
-        'topology_uuid': {'uuid': topology_uuid}
-    })
-
-def grpc_policy_rule_id(policy_rule_uuid):
-    return PolicyRuleId(**{
-        'uuid': {'uuid': policy_rule_uuid}
-    })
-
-class _Resource(Resource):
-    def __init__(self, database : Database) -> None:
-        super().__init__()
-        self.database = database
-        self.servicer = ContextServiceServicerImpl(self.database, None)
-
-class ContextIds(_Resource):
-    def get(self):
-        return format_grpc_to_json(self.servicer.ListContextIds(Empty(), None))
-
-class Contexts(_Resource):
-    def get(self):
-        return format_grpc_to_json(self.servicer.ListContexts(Empty(), None))
-
-class Context(_Resource):
-    def get(self, context_uuid : str):
-        return format_grpc_to_json(self.servicer.GetContext(grpc_context_id(context_uuid), None))
-
-class TopologyIds(_Resource):
-    def get(self, context_uuid : str):
-        return format_grpc_to_json(self.servicer.ListTopologyIds(grpc_context_id(context_uuid), None))
-
-class Topologies(_Resource):
-    def get(self, context_uuid : str):
-        return format_grpc_to_json(self.servicer.ListTopologies(grpc_context_id(context_uuid), None))
-
-class Topology(_Resource):
-    def get(self, context_uuid : str, topology_uuid : str):
-        return format_grpc_to_json(self.servicer.GetTopology(grpc_topology_id(context_uuid, topology_uuid), None))
-
-class ServiceIds(_Resource):
-    def get(self, context_uuid : str):
-        return format_grpc_to_json(self.servicer.ListServiceIds(grpc_context_id(context_uuid), None))
-
-class Services(_Resource):
-    def get(self, context_uuid : str):
-        return format_grpc_to_json(self.servicer.ListServices(grpc_context_id(context_uuid), None))
-
-class Service(_Resource):
-    def get(self, context_uuid : str, service_uuid : str):
-        return format_grpc_to_json(self.servicer.GetService(grpc_service_id(context_uuid, service_uuid), None))
-
-class SliceIds(_Resource):
-    def get(self, context_uuid : str):
-        return format_grpc_to_json(self.servicer.ListSliceIds(grpc_context_id(context_uuid), None))
-
-class Slices(_Resource):
-    def get(self, context_uuid : str):
-        return format_grpc_to_json(self.servicer.ListSlices(grpc_context_id(context_uuid), None))
-
-class Slice(_Resource):
-    def get(self, context_uuid : str, slice_uuid : str):
-        return format_grpc_to_json(self.servicer.GetSlice(grpc_slice_id(context_uuid, slice_uuid), None))
-
-class DeviceIds(_Resource):
-    def get(self):
-        return format_grpc_to_json(self.servicer.ListDeviceIds(Empty(), None))
-
-class Devices(_Resource):
-    def get(self):
-        return format_grpc_to_json(self.servicer.ListDevices(Empty(), None))
-
-class Device(_Resource):
-    def get(self, device_uuid : str):
-        return format_grpc_to_json(self.servicer.GetDevice(grpc_device_id(device_uuid), None))
-
-class LinkIds(_Resource):
-    def get(self):
-        return format_grpc_to_json(self.servicer.ListLinkIds(Empty(), None))
-
-class Links(_Resource):
-    def get(self):
-        return format_grpc_to_json(self.servicer.ListLinks(Empty(), None))
-
-class Link(_Resource):
-    def get(self, link_uuid : str):
-        return format_grpc_to_json(self.servicer.GetLink(grpc_link_id(link_uuid), None))
-
-class ConnectionIds(_Resource):
-    def get(self, context_uuid : str, service_uuid : str):
-        return format_grpc_to_json(self.servicer.ListConnectionIds(grpc_service_id(context_uuid, service_uuid), None))
-
-class Connections(_Resource):
-    def get(self, context_uuid : str, service_uuid : str):
-        return format_grpc_to_json(self.servicer.ListConnections(grpc_service_id(context_uuid, service_uuid), None))
-
-class Connection(_Resource):
-    def get(self, connection_uuid : str):
-        return format_grpc_to_json(self.servicer.GetConnection(grpc_connection_id(connection_uuid), None))
-
-class PolicyRuleIds(_Resource):
-    def get(self):
-        return format_grpc_to_json(self.servicer.ListPolicyRuleIds(Empty(), None))
-
-class PolicyRules(_Resource):
-    def get(self):
-        return format_grpc_to_json(self.servicer.ListPolicyRules(Empty(), None))
-
-class PolicyRule(_Resource):
-    def get(self, policy_rule_uuid : str):
-        return format_grpc_to_json(self.servicer.GetPolicyRule(grpc_policy_rule_id(policy_rule_uuid), None))
-
-class DumpText(Resource):
-    def __init__(self, database : Database) -> None:
-        super().__init__()
-        self.database = database
-
-    def get(self):
-        db_entries = self.database.dump()
-        num_entries = len(db_entries)
-        response = ['----- Database Dump [{:3d} entries] -------------------------'.format(num_entries)]
-        for db_entry in db_entries:
-            response.append('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-        response.append('-----------------------------------------------------------')
-        headers = {'Content-Type': 'text/plain'}
-        return make_response('\n'.join(response), 200, headers)
-
-class DumpHtml(Resource):
-    def __init__(self, database : Database) -> None:
-        super().__init__()
-        self.database = database
-
-    def get(self):
-        db_entries = self.database.dump()
-        num_entries = len(db_entries)
-        response = []
-        response.append('<HTML><HEAD><TITLE>Database Dump [{:3d} entries]</TITLE></HEAD><BODY>'.format(num_entries))
-        response.append('<H3>Database Dump [{:3d} entries]</H3><HR/>'.format(num_entries))
-        response.append('<TABLE border=1>')
-        response.append('<TR><TH>Type</TH><TH>Key</TH><TH>Value</TH></TR>')
-        for db_entry in db_entries:
-            response.append('<TR><TD>{:s}</TD><TD>{:s}</TD><TD>{:s}</TD></TR>'.format(*db_entry))
-        response.append('</TABLE></BODY></HTML>')
-
-        headers = {'Content-Type': 'text/html'}
-        return make_response(''.join(response), 200, headers)
-
-
-# Use 'path' type in Service and Sink because service_uuid and link_uuid might contain char '/' and Flask is unable to
-# recognize them in 'string' type.
-RESOURCES = [
-    # (endpoint_name, resource_class, resource_url)
-    ('api.context_ids',    ContextIds,    '/context_ids'),
-    ('api.contexts',       Contexts,      '/contexts'),
-    ('api.context',        Context,       '/context/<string:context_uuid>'),
-
-    ('api.topology_ids',   TopologyIds,   '/context/<string:context_uuid>/topology_ids'),
-    ('api.topologies',     Topologies,    '/context/<string:context_uuid>/topologies'),
-    ('api.topology',       Topology,      '/context/<string:context_uuid>/topology/<string:topology_uuid>'),
-
-    ('api.service_ids',    ServiceIds,    '/context/<string:context_uuid>/service_ids'),
-    ('api.services',       Services,      '/context/<string:context_uuid>/services'),
-    ('api.service',        Service,       '/context/<string:context_uuid>/service/<path:service_uuid>'),
-
-    ('api.slice_ids',      SliceIds,      '/context/<string:context_uuid>/slice_ids'),
-    ('api.slices',         Slices,        '/context/<string:context_uuid>/slices'),
-    ('api.slice',          Slice,         '/context/<string:context_uuid>/slice/<path:slice_uuid>'),
-
-    ('api.device_ids',     DeviceIds,     '/device_ids'),
-    ('api.devices',        Devices,       '/devices'),
-    ('api.device',         Device,        '/device/<string:device_uuid>'),
-
-    ('api.link_ids',       LinkIds,       '/link_ids'),
-    ('api.links',          Links,         '/links'),
-    ('api.link',           Link,          '/link/<path:link_uuid>'),
-
-    ('api.connection_ids', ConnectionIds, '/context/<string:context_uuid>/service/<path:service_uuid>/connection_ids'),
-    ('api.connections',    Connections,   '/context/<string:context_uuid>/service/<path:service_uuid>/connections'),
-    ('api.connection',     Connection,    '/connection/<path:connection_uuid>'),
-
-    ('api.policyrule_ids', PolicyRuleIds, '/policyrule_ids'),
-    ('api.policyrules',    PolicyRules,   '/policyrules'),
-    ('api.policyrule',     PolicyRule,    '/policyrule/<string:policyrule_uuid>'),
-
-    ('api.dump.text',      DumpText,      '/dump/text'),
-    ('api.dump.html',      DumpHtml,      '/dump/html'),
-]
diff --git a/src/context/tests/Constants.py b/src/context/tests/Constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..b29584a7b743db8c4cc75dbe9418b42142797fed
--- /dev/null
+++ b/src/context/tests/Constants.py
@@ -0,0 +1,15 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+GET_EVENTS_TIMEOUT = 60.0
diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py
index 1cf929cfa578e8bbf8f95885cc2a7bc7e7b9f3ef..19d53619cd34812c45312bde10d830287246f732 100644
--- a/src/context/tests/Objects.py
+++ b/src/context/tests/Objects.py
@@ -12,7 +12,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from typing import Dict, List, Optional, Tuple
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from common.tools.object_factory.ConfigRule import json_config_rule_set
 from common.tools.object_factory.Connection import json_connection, json_connection_id
@@ -22,18 +23,21 @@ from common.tools.object_factory.Device import json_device_id, json_device_packe
 from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id
 from common.tools.object_factory.Link import json_link, json_link_id
 from common.tools.object_factory.Service import json_service_id, json_service_l3nm_planned
+from common.tools.object_factory.Slice import json_slice_id, json_slice
 from common.tools.object_factory.Topology import json_topology, json_topology_id
-from common.tools.object_factory.PolicyRule import json_policy_rule, json_policy_rule_id
+from common.tools.object_factory.PolicyRule import json_policyrule, json_policyrule_id
 
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+CONTEXT_NAME = DEFAULT_CONTEXT_NAME
+CONTEXT_ID   = json_context_id(CONTEXT_NAME)
+CONTEXT      = json_context(CONTEXT_NAME, name=CONTEXT_NAME)
 
 
 # ----- Topology -------------------------------------------------------------------------------------------------------
-TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
-TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
+TOPOLOGY_NAME = DEFAULT_TOPOLOGY_NAME
+TOPOLOGY_ID   = json_topology_id(TOPOLOGY_NAME, context_id=CONTEXT_ID)
+TOPOLOGY      = json_topology(TOPOLOGY_NAME, context_id=CONTEXT_ID, name=TOPOLOGY_NAME)
 
 
 # ----- KPI Sample Types -----------------------------------------------------------------------------------------------
@@ -46,161 +50,126 @@ PACKET_PORT_SAMPLE_TYPES = [
 
 
 # ----- Device ---------------------------------------------------------------------------------------------------------
-DEVICE_R1_UUID  = 'R1'
-DEVICE_R1_ID    = json_device_id(DEVICE_R1_UUID)
-DEVICE_R1_EPS   = [
-    json_endpoint(DEVICE_R1_ID, 'EP2',   '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES),
-    json_endpoint(DEVICE_R1_ID, 'EP3',   '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES),
-    json_endpoint(DEVICE_R1_ID, 'EP100', '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES),
-]
-DEVICE_R1_RULES = [
-    json_config_rule_set('dev/rsrc1/value', 'value1'),
-    json_config_rule_set('dev/rsrc2/value', 'value2'),
-    json_config_rule_set('dev/rsrc3/value', 'value3'),
-]
-DEVICE_R1       = json_device_packetrouter_disabled(
-    DEVICE_R1_UUID, endpoints=DEVICE_R1_EPS, config_rules=DEVICE_R1_RULES)
-
-
-DEVICE_R2_UUID  = 'R2'
-DEVICE_R2_ID    = json_device_id(DEVICE_R2_UUID)
-DEVICE_R2_EPS   = [
-    json_endpoint(DEVICE_R2_ID, 'EP1',   '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES),
-    json_endpoint(DEVICE_R2_ID, 'EP3',   '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES),
-    json_endpoint(DEVICE_R2_ID, 'EP100', '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES),
-]
-DEVICE_R2_RULES = [
-    json_config_rule_set('dev/rsrc1/value', 'value4'),
-    json_config_rule_set('dev/rsrc2/value', 'value5'),
-    json_config_rule_set('dev/rsrc3/value', 'value6'),
-]
-DEVICE_R2       = json_device_packetrouter_disabled(
-    DEVICE_R2_UUID, endpoints=DEVICE_R2_EPS, config_rules=DEVICE_R2_RULES)
-
-
-DEVICE_R3_UUID  = 'R3'
-DEVICE_R3_ID    = json_device_id(DEVICE_R3_UUID)
-DEVICE_R3_EPS   = [
-    json_endpoint(DEVICE_R3_ID, 'EP1',   '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES),
-    json_endpoint(DEVICE_R3_ID, 'EP2',   '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES),
-    json_endpoint(DEVICE_R3_ID, 'EP100', '10G', topology_id=TOPOLOGY_ID, kpi_sample_types=PACKET_PORT_SAMPLE_TYPES),
-]
-DEVICE_R3_RULES = [
-    json_config_rule_set('dev/rsrc1/value', 'value4'),
-    json_config_rule_set('dev/rsrc2/value', 'value5'),
-    json_config_rule_set('dev/rsrc3/value', 'value6'),
-]
-DEVICE_R3       = json_device_packetrouter_disabled(
-    DEVICE_R3_UUID, endpoints=DEVICE_R3_EPS, config_rules=DEVICE_R3_RULES)
+def compose_device(name : str, endpoint_names : List[str]) -> Tuple[str, Dict, Dict]:
+    device_id = json_device_id(name)
+    endpoints = [
+        json_endpoint(device_id, endpoint_name, 'copper', topology_id=TOPOLOGY_ID,
+            kpi_sample_types=PACKET_PORT_SAMPLE_TYPES)
+        for endpoint_name in endpoint_names
+    ]
+    config_rules = [
+        json_config_rule_set('dev/rsrc1/value', 'value1'),
+        json_config_rule_set('dev/rsrc2/value', 'value2'),
+        json_config_rule_set('dev/rsrc3/value', 'value3'),
+    ]
+    device = json_device_packetrouter_disabled(name, endpoints=endpoints, config_rules=config_rules)
+    return name, device_id, device
+
+DEVICE_R1_NAME, DEVICE_R1_ID, DEVICE_R1 = compose_device('R1', ['1.2', '1.3', '2.2', '2.3'])
+DEVICE_R2_NAME, DEVICE_R2_ID, DEVICE_R2 = compose_device('R2', ['1.1', '1.3', '2.1', '2.3'])
+DEVICE_R3_NAME, DEVICE_R3_ID, DEVICE_R3 = compose_device('R3', ['1.1', '1.2', '2.1', '2.2'])
 
 
 # ----- Link -----------------------------------------------------------------------------------------------------------
-LINK_R1_R2_UUID  = 'R1/EP2-R2/EP1'
-LINK_R1_R2_ID    = json_link_id(LINK_R1_R2_UUID)
-LINK_R1_R2_EPIDS = [
-    json_endpoint_id(DEVICE_R1_ID, 'EP2', topology_id=TOPOLOGY_ID),
-    json_endpoint_id(DEVICE_R2_ID, 'EP1', topology_id=TOPOLOGY_ID),
-]
-LINK_R1_R2       = json_link(LINK_R1_R2_UUID, LINK_R1_R2_EPIDS)
-
-
-LINK_R2_R3_UUID  = 'R2/EP3-R3/EP2'
-LINK_R2_R3_ID    = json_link_id(LINK_R2_R3_UUID)
-LINK_R2_R3_EPIDS = [
-    json_endpoint_id(DEVICE_R2_ID, 'EP3', topology_id=TOPOLOGY_ID),
-    json_endpoint_id(DEVICE_R3_ID, 'EP2', topology_id=TOPOLOGY_ID),
-]
-LINK_R2_R3       = json_link(LINK_R2_R3_UUID, LINK_R2_R3_EPIDS)
-
+def compose_link(name : str, endpoint_ids : List[Tuple[str, str]]) -> Tuple[str, Dict, Dict]:
+    link_id = json_link_id(name)
+    endpoint_ids = [
+        json_endpoint_id(device_id, endpoint_name, topology_id=TOPOLOGY_ID)
+        for device_id, endpoint_name in endpoint_ids
+    ]
+    link = json_link(name, endpoint_ids)
+    return name, link_id, link
 
-LINK_R1_R3_UUID  = 'R1/EP3-R3/EP1'
-LINK_R1_R3_ID    = json_link_id(LINK_R1_R3_UUID)
-LINK_R1_R3_EPIDS = [
-    json_endpoint_id(DEVICE_R1_ID, 'EP3', topology_id=TOPOLOGY_ID),
-    json_endpoint_id(DEVICE_R3_ID, 'EP1', topology_id=TOPOLOGY_ID),
-]
-LINK_R1_R3       = json_link(LINK_R1_R3_UUID, LINK_R1_R3_EPIDS)
+LINK_R1_R2_NAME, LINK_R1_R2_ID, LINK_R1_R2 = compose_link('R1==R2', [(DEVICE_R1_ID, '1.2'), (DEVICE_R2_ID, '1.1')])
+LINK_R2_R3_NAME, LINK_R2_R3_ID, LINK_R2_R3 = compose_link('R2==R3', [(DEVICE_R2_ID, '1.3'), (DEVICE_R3_ID, '1.2')])
+LINK_R1_R3_NAME, LINK_R1_R3_ID, LINK_R1_R3 = compose_link('R1==R3', [(DEVICE_R1_ID, '1.3'), (DEVICE_R3_ID, '1.1')])
 
 
 # ----- Service --------------------------------------------------------------------------------------------------------
-SERVICE_R1_R2_UUID  = 'SVC:R1/EP100-R2/EP100'
-SERVICE_R1_R2_ID    = json_service_id(SERVICE_R1_R2_UUID, context_id=CONTEXT_ID)
-SERVICE_R1_R2_EPIDS = [
-    json_endpoint_id(DEVICE_R1_ID, 'EP100', topology_id=TOPOLOGY_ID),
-    json_endpoint_id(DEVICE_R2_ID, 'EP100', topology_id=TOPOLOGY_ID),
-]
-SERVICE_R1_R2_CONST = [
-    json_constraint_custom('latency[ms]', '15.2'),
-    json_constraint_custom('jitter[us]',  '1.2'),
-]
-SERVICE_R1_R2_RULES = [
-    json_config_rule_set('svc/rsrc1/value', 'value7'),
-    json_config_rule_set('svc/rsrc2/value', 'value8'),
-    json_config_rule_set('svc/rsrc3/value', 'value9'),
-]
-SERVICE_R1_R2       = json_service_l3nm_planned(
-    SERVICE_R1_R2_UUID, endpoint_ids=SERVICE_R1_R2_EPIDS, constraints=SERVICE_R1_R2_CONST,
-    config_rules=SERVICE_R1_R2_RULES)
-
-
-SERVICE_R1_R3_UUID  = 'SVC:R1/EP100-R3/EP100'
-SERVICE_R1_R3_ID    = json_service_id(SERVICE_R1_R3_UUID, context_id=CONTEXT_ID)
-SERVICE_R1_R3_EPIDS = [
-    json_endpoint_id(DEVICE_R1_ID, 'EP100', topology_id=TOPOLOGY_ID),
-    json_endpoint_id(DEVICE_R3_ID, 'EP100', topology_id=TOPOLOGY_ID),
-]
-SERVICE_R1_R3_CONST = [
-    json_constraint_custom('latency[ms]', '5.8'),
-    json_constraint_custom('jitter[us]',  '0.1'),
-]
-SERVICE_R1_R3_RULES = [
-    json_config_rule_set('svc/rsrc1/value', 'value7'),
-    json_config_rule_set('svc/rsrc2/value', 'value8'),
-    json_config_rule_set('svc/rsrc3/value', 'value9'),
-]
-SERVICE_R1_R3       = json_service_l3nm_planned(
-    SERVICE_R1_R3_UUID, endpoint_ids=SERVICE_R1_R3_EPIDS, constraints=SERVICE_R1_R3_CONST,
-    config_rules=SERVICE_R1_R3_RULES)
-
-
-SERVICE_R2_R3_UUID  = 'SVC:R2/EP100-R3/EP100'
-SERVICE_R2_R3_ID    = json_service_id(SERVICE_R2_R3_UUID, context_id=CONTEXT_ID)
-SERVICE_R2_R3_EPIDS = [
-    json_endpoint_id(DEVICE_R2_ID, 'EP100', topology_id=TOPOLOGY_ID),
-    json_endpoint_id(DEVICE_R3_ID, 'EP100', topology_id=TOPOLOGY_ID),
-]
-SERVICE_R2_R3_CONST = [
-    json_constraint_custom('latency[ms]', '23.1'),
-    json_constraint_custom('jitter[us]',  '3.4'),
-]
-SERVICE_R2_R3_RULES = [
-    json_config_rule_set('svc/rsrc1/value', 'value7'),
-    json_config_rule_set('svc/rsrc2/value', 'value8'),
-    json_config_rule_set('svc/rsrc3/value', 'value9'),
-]
-SERVICE_R2_R3       = json_service_l3nm_planned(
-    SERVICE_R2_R3_UUID, endpoint_ids=SERVICE_R2_R3_EPIDS, constraints=SERVICE_R2_R3_CONST,
-    config_rules=SERVICE_R2_R3_RULES)
+def compose_service(
+    name : str, endpoint_ids : List[Tuple[str, str]], latency_ms : float, jitter_us : float
+) -> Tuple[str, Dict, Dict]:
+    service_id = json_service_id(name, context_id=CONTEXT_ID)
+    endpoint_ids = [
+        json_endpoint_id(device_id, endpoint_name, topology_id=TOPOLOGY_ID)
+        for device_id, endpoint_name in endpoint_ids
+    ]
+    constraints = [
+        json_constraint_custom('latency[ms]', str(latency_ms)),
+        json_constraint_custom('jitter[us]',  str(jitter_us)),
+    ]
+    config_rules = [
+        json_config_rule_set('svc/rsrc1/value', 'value7'),
+        json_config_rule_set('svc/rsrc2/value', 'value8'),
+        json_config_rule_set('svc/rsrc3/value', 'value9'),
+    ]
+    service = json_service_l3nm_planned(
+        name, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules)
+    return name, service_id, service
+
+SERVICE_R1_R2_NAME, SERVICE_R1_R2_ID, SERVICE_R1_R2 = compose_service(
+    'R1-R2', [(DEVICE_R1_ID, '2.2'), (DEVICE_R2_ID, '2.1')], 15.2, 1.2)
+
+SERVICE_R1_R3_NAME, SERVICE_R1_R3_ID, SERVICE_R1_R3 = compose_service(
+    'R1-R3', [(DEVICE_R1_ID, '2.3'), (DEVICE_R3_ID, '2.1')], 5.8, 0.1)
+
+SERVICE_R2_R3_NAME, SERVICE_R2_R3_ID, SERVICE_R2_R3 = compose_service(
+    'R2-R3', [(DEVICE_R2_ID, '2.3'), (DEVICE_R3_ID, '2.2')], 23.1, 3.4)
+
+
+# ----- Slice ----------------------------------------------------------------------------------------------------------
+def compose_slice(
+    name : str, endpoint_ids : List[Tuple[str, str]], latency_ms : float, jitter_us : float,
+    service_ids : List[Dict] = [], subslice_ids : List[Dict] = [], owner : Optional[Dict] = None
+) -> Tuple[str, Dict, Dict]:
+    slice_id = json_slice_id(name, context_id=CONTEXT_ID)
+    endpoint_ids = [
+        json_endpoint_id(device_id, endpoint_name, topology_id=TOPOLOGY_ID)
+        for device_id, endpoint_name in endpoint_ids
+    ]
+    constraints = [
+        json_constraint_custom('latency[ms]', str(latency_ms)),
+        json_constraint_custom('jitter[us]',  str(jitter_us)),
+    ]
+    config_rules = [
+        json_config_rule_set('svc/rsrc1/value', 'value7'),
+        json_config_rule_set('svc/rsrc2/value', 'value8'),
+        json_config_rule_set('svc/rsrc3/value', 'value9'),
+    ]
+    slice_ = json_slice(
+        name, context_id=CONTEXT_ID, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules,
+        service_ids=service_ids, subslice_ids=subslice_ids, owner=owner)
+    return name, slice_id, slice_
+
+SLICE_R1_R3_NAME, SLICE_R1_R3_ID, SLICE_R1_R3 = compose_slice(
+    'R1-R3', [(DEVICE_R1_ID, '2.3'), (DEVICE_R3_ID, '2.1')], 15.2, 1.2,
+    service_ids=[SERVICE_R1_R2_ID, SERVICE_R2_R3_ID],
+    subslice_ids=[], owner=None)
 
 
 # ----- Connection -----------------------------------------------------------------------------------------------------
-CONNECTION_R1_R3_UUID   = 'CON:R1/EP100-R3/EP100'
-CONNECTION_R1_R3_ID     = json_connection_id(CONNECTION_R1_R3_UUID)
-CONNECTION_R1_R3_EPIDS  = [
-    json_endpoint_id(DEVICE_R1_ID, 'EP100', topology_id=TOPOLOGY_ID),
-    json_endpoint_id(DEVICE_R1_ID, 'EP2',   topology_id=TOPOLOGY_ID),
-    json_endpoint_id(DEVICE_R2_ID, 'EP1',   topology_id=TOPOLOGY_ID),
-    json_endpoint_id(DEVICE_R2_ID, 'EP3',   topology_id=TOPOLOGY_ID),
-    json_endpoint_id(DEVICE_R3_ID, 'EP2',   topology_id=TOPOLOGY_ID),
-    json_endpoint_id(DEVICE_R3_ID, 'EP100', topology_id=TOPOLOGY_ID),
-]
-CONNECTION_R1_R3_SVCIDS = [SERVICE_R1_R2_ID, SERVICE_R2_R3_ID]
-CONNECTION_R1_R3        = json_connection(
-    CONNECTION_R1_R3_UUID, service_id=SERVICE_R1_R3_ID, path_hops_endpoint_ids=CONNECTION_R1_R3_EPIDS,
-    sub_service_ids=CONNECTION_R1_R3_SVCIDS)
+def compose_connection(
+    name : str, service_id : Dict, endpoint_ids : List[Tuple[str, str]], sub_service_ids : List[Dict] = []
+) -> Tuple[str, Dict, Dict]:
+    connection_id = json_connection_id(name)
+    endpoint_ids = [
+        json_endpoint_id(device_id, endpoint_name, topology_id=TOPOLOGY_ID)
+        for device_id, endpoint_name in endpoint_ids
+    ]
+    connection = json_connection(
+        name, service_id=service_id, path_hops_endpoint_ids=endpoint_ids, sub_service_ids=sub_service_ids)
+    return name, connection_id, connection
+
+CONNECTION_R1_R3_NAME, CONNECTION_R1_R3_ID, CONNECTION_R1_R3 = compose_connection(
+    'CON:R1/2.3-R3/2.1', SERVICE_R1_R3_ID, [
+        (DEVICE_R1_ID, '2.3'),
+        (DEVICE_R1_ID, '1.2'), (DEVICE_R2_ID, '1.1'),
+        (DEVICE_R2_ID, '1.3'), (DEVICE_R3_ID, '1.2'),
+        (DEVICE_R3_ID, '2.1')
+    ], sub_service_ids=[SERVICE_R1_R2_ID, SERVICE_R2_R3_ID])
 
 
 # ----- PolicyRule -------------------------------------------------------------------------------------------------------
-POLICY_RULE_UUID = '56380225-3e40-4f74-9162-529f8dcb96a1'
-POLICY_RULE_ID   = json_policy_rule_id(POLICY_RULE_UUID)
-POLICY_RULE      = json_policy_rule(POLICY_RULE_UUID)
+POLICYRULE_NAME = 'my-device-policy'
+POLICYRULE_ID   = json_policyrule_id(POLICYRULE_NAME)
+POLICYRULE      = json_policyrule(POLICYRULE_NAME, policy_priority=1)
diff --git a/src/context/tests/conftest.py b/src/context/tests/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..93b8c66beff65cdbbfd97ca4483f55333fa36c71
--- /dev/null
+++ b/src/context/tests/conftest.py
@@ -0,0 +1,78 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os, pytest, sqlalchemy
+from _pytest.config import Config
+from _pytest.terminal import TerminalReporter
+from typing import Tuple
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name,
+    get_service_port_grpc, get_service_port_http)
+from common.message_broker.Factory import get_messagebroker_backend
+from common.message_broker.MessageBroker import MessageBroker
+from common.method_wrappers.Decorator import MetricsPool
+from context.client.ContextClient import ContextClient
+from context.service.ContextService import ContextService
+from context.service.database.Engine import Engine
+from context.service.database.models._Base import rebuild_database
+
+LOCAL_HOST = '127.0.0.1'
+GRPC_PORT = 10000 + int(get_service_port_grpc(ServiceNameEnum.CONTEXT))   # avoid privileged ports
+HTTP_PORT = 10000 + int(get_service_port_http(ServiceNameEnum.CONTEXT))   # avoid privileged ports
+
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT)
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(HTTP_PORT)
+
+@pytest.fixture(scope='session')
+def context_db_mb(request) -> Tuple[sqlalchemy.engine.Engine, MessageBroker]:   # pylint: disable=unused-argument
+    _db_engine = Engine.get_engine()
+    Engine.drop_database(_db_engine)
+    Engine.create_database(_db_engine)
+    rebuild_database(_db_engine)
+
+    _msg_broker = MessageBroker(get_messagebroker_backend())
+    yield _db_engine, _msg_broker
+    _msg_broker.terminate()
+
+RAW_METRICS : MetricsPool = None
+
+@pytest.fixture(scope='session')
+def context_service(
+    context_db_mb : Tuple[sqlalchemy.engine.Engine, MessageBroker]  # pylint: disable=redefined-outer-name
+):
+    global RAW_METRICS # pylint: disable=global-statement
+    _service = ContextService(context_db_mb[0], context_db_mb[1])
+    RAW_METRICS = _service.context_servicer._get_metrics()
+    _service.start()
+    yield _service
+    _service.stop()
+
+@pytest.fixture(scope='session')
+def context_client(context_service : ContextService): # pylint: disable=redefined-outer-name,unused-argument
+    _client = ContextClient()
+    yield _client
+    _client.close()
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_terminal_summary(
+    terminalreporter : TerminalReporter, exitstatus : int, config : Config  # pylint: disable=unused-argument
+):
+    yield
+
+    if RAW_METRICS is not None:
+        print('')
+        print('Performance Results:')
+        print(RAW_METRICS.get_pretty_table().get_string())
diff --git a/src/context/tests/test_connection.py b/src/context/tests/test_connection.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4b9e4824ae722ae62c95bcba04fedad33972b89
--- /dev/null
+++ b/src/context/tests/test_connection.py
@@ -0,0 +1,247 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, grpc, pytest #, time
+from common.proto.context_pb2 import (
+    Connection, ConnectionId, Context, ContextId, Device, DeviceId, EndPointId, Service, ServiceId, Topology,
+    TopologyId)
+#from common.proto.context_pb2 import (
+#    ConnectionEvent, ContextEvent, DeviceEvent, EventTypeEnum, ServiceEvent, TopologyEvent)
+from context.client.ContextClient import ContextClient
+#from context.client.EventsCollector import EventsCollector
+from context.service.database.uuids.Connection import connection_get_uuid
+from context.service.database.uuids.EndPoint import endpoint_get_uuid
+#from .Constants import GET_EVENTS_TIMEOUT
+from .Objects import (
+    CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_NAME, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID,
+    DEVICE_R2, DEVICE_R2_ID, DEVICE_R3, DEVICE_R3_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R3, SERVICE_R1_R3_ID,
+    SERVICE_R2_R3, SERVICE_R2_R3_ID, TOPOLOGY, TOPOLOGY_ID)
+
+@pytest.mark.depends(on=['context/tests/test_service.py::test_service', 'context/tests/test_slice.py::test_slice'])
+def test_connection(context_client : ContextClient) -> None:
+
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    #events_collector = EventsCollector(
+    #    context_client, log_events_received=True,
+    #    activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True,
+    #    activate_link_collector = True, activate_service_collector = True, activate_slice_collector = True,
+    #    activate_connection_collector = True)
+    #events_collector.start()
+    #time.sleep(3)
+
+    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
+    response = context_client.SetContext(Context(**CONTEXT))
+    context_uuid = response.context_uuid.uuid
+
+    response = context_client.SetTopology(Topology(**TOPOLOGY))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    #topology_uuid = response.topology_uuid.uuid
+
+    response = context_client.SetDevice(Device(**DEVICE_R1))
+    #device_r1_uuid = response.device_uuid.uuid
+
+    response = context_client.SetDevice(Device(**DEVICE_R2))
+    #device_r2_uuid = response.device_uuid.uuid
+
+    response = context_client.SetDevice(Device(**DEVICE_R3))
+    #device_r3_uuid = response.device_uuid.uuid
+
+    response = context_client.SetService(Service(**SERVICE_R1_R2))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    #service_r1_r2_uuid = response.service_uuid.uuid
+
+    response = context_client.SetService(Service(**SERVICE_R2_R3))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    #service_r2_r3_uuid = response.service_uuid.uuid
+
+    response = context_client.SetService(Service(**SERVICE_R1_R3))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    service_r1_r3_uuid = response.service_uuid.uuid
+
+    #events = events_collector.get_events(block=True, count=8, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(events[0], ContextEvent)
+    #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[0].context_id.context_uuid.uuid == context_uuid
+    #assert isinstance(events[1], TopologyEvent)
+    #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[1].topology_id.topology_uuid.uuid == topology_uuid
+    #assert isinstance(events[2], DeviceEvent)
+    #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[2].device_id.device_uuid.uuid == device_r1_uuid
+    #assert isinstance(events[3], DeviceEvent)
+    #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[3].device_id.device_uuid.uuid == device_r2_uuid
+    #assert isinstance(events[4], DeviceEvent)
+    #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[4].device_id.device_uuid.uuid == device_r3_uuid
+    #assert isinstance(events[5], ServiceEvent)
+    #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[5].service_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[5].service_id.service_uuid.uuid == service_r1_r2_uuid
+    #assert isinstance(events[6], ServiceEvent)
+    #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[6].service_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[6].service_id.service_uuid.uuid == service_r2_r3_uuid
+    #assert isinstance(events[7], ServiceEvent)
+    #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[7].service_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[7].service_id.service_uuid.uuid == service_r1_r3_uuid
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    connection_id = ConnectionId(**CONNECTION_R1_R3_ID)
+    connection_uuid = connection_get_uuid(connection_id, allow_random=False)
+    with pytest.raises(grpc.RpcError) as e:
+        context_client.GetConnection(connection_id)
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    MSG = 'Connection({:s}) not found; connection_uuid generated was: {:s}'
+    assert e.value.details() == MSG.format(CONNECTION_R1_R3_NAME, connection_uuid)
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID))
+    assert len(response.connection_ids) == 0
+
+    response = context_client.ListConnections(ServiceId(**SERVICE_R1_R3_ID))
+    assert len(response.connections) == 0
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    with pytest.raises(grpc.RpcError) as e:
+        WRONG_CONNECTION = copy.deepcopy(CONNECTION_R1_R3)
+        WRONG_CONNECTION['path_hops_endpoint_ids'][0]\
+            ['topology_id']['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid'
+        context_client.SetConnection(Connection(**WRONG_CONNECTION))
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    wrong_endpoint_id = EndPointId(**WRONG_CONNECTION['path_hops_endpoint_ids'][0])
+    _,_,wrong_endpoint_uuid = endpoint_get_uuid(wrong_endpoint_id, allow_random=False)
+    msg = 'endpoint({:s}) not found; while inserting in table "connection_endpoint"'.format(wrong_endpoint_uuid)
+    assert e.value.details() == msg
+    # TODO: should we check that all endpoints belong to same topology?
+    # TODO: should we check that endpoints form links over the topology?
+
+    response = context_client.SetConnection(Connection(**CONNECTION_R1_R3))
+    connection_r1_r3_uuid = response.connection_uuid.uuid
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, ConnectionEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID))
+    assert response.connection_id.connection_uuid.uuid == connection_r1_r3_uuid
+    assert response.service_id.context_id.context_uuid.uuid == context_uuid
+    assert response.service_id.service_uuid.uuid == service_r1_r3_uuid
+    assert len(response.path_hops_endpoint_ids) == 6
+    assert len(response.sub_service_ids) == 2
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID))
+    assert len(response.connection_ids) == 1
+    assert response.connection_ids[0].connection_uuid.uuid == connection_r1_r3_uuid
+
+    response = context_client.ListConnections(ServiceId(**SERVICE_R1_R3_ID))
+    assert len(response.connections) == 1
+    assert response.connections[0].connection_id.connection_uuid.uuid == connection_r1_r3_uuid
+    assert len(response.connections[0].path_hops_endpoint_ids) == 6
+    assert len(response.connections[0].sub_service_ids) == 2
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    # TODO: change something... path? subservices?
+    response = context_client.SetConnection(Connection(**CONNECTION_R1_R3))
+    assert response.connection_uuid.uuid == connection_r1_r3_uuid
+
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, ConnectionEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    #assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid
+
+    # ----- Get when the object is modified ----------------------------------------------------------------------------
+    response = context_client.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID))
+    assert response.connection_id.connection_uuid.uuid == connection_r1_r3_uuid
+    assert response.service_id.context_id.context_uuid.uuid == context_uuid
+    assert response.service_id.service_uuid.uuid == service_r1_r3_uuid
+    assert len(response.path_hops_endpoint_ids) == 6
+    assert len(response.sub_service_ids) == 2
+
+    # ----- List when the object is modified ---------------------------------------------------------------------------
+    response = context_client.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID))
+    assert len(response.connection_ids) == 1
+    assert response.connection_ids[0].connection_uuid.uuid == connection_r1_r3_uuid
+
+    response = context_client.ListConnections(ServiceId(**SERVICE_R1_R3_ID))
+    assert len(response.connections) == 1
+    assert response.connections[0].connection_id.connection_uuid.uuid == connection_r1_r3_uuid
+    assert len(response.connections[0].path_hops_endpoint_ids) == 6
+    assert len(response.connections[0].sub_service_ids) == 2
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client.RemoveConnection(ConnectionId(**CONNECTION_R1_R3_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, ConnectionEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert event.connection_id.connection_uuid.uuid == connection_r1_r3_uuid
+
+    # ----- List after deleting the object -----------------------------------------------------------------------------
+    response = context_client.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID))
+    assert len(response.connection_ids) == 0
+
+    response = context_client.ListConnections(ServiceId(**SERVICE_R1_R3_ID))
+    assert len(response.connections) == 0
+
+    # ----- Clean dependencies used in the test and capture related events ---------------------------------------------
+    context_client.RemoveService(ServiceId(**SERVICE_R1_R3_ID))
+    context_client.RemoveService(ServiceId(**SERVICE_R2_R3_ID))
+    context_client.RemoveService(ServiceId(**SERVICE_R1_R2_ID))
+    context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID))
+    context_client.RemoveDevice(DeviceId(**DEVICE_R2_ID))
+    context_client.RemoveDevice(DeviceId(**DEVICE_R3_ID))
+    context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID))
+    context_client.RemoveContext(ContextId(**CONTEXT_ID))
+
+    #events = events_collector.get_events(block=True, count=8, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(events[0], ServiceEvent)
+    #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[0].service_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[0].service_id.service_uuid.uuid == service_r1_r3_uuid
+    #assert isinstance(events[1], ServiceEvent)
+    #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[1].service_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[1].service_id.service_uuid.uuid == service_r2_r3_uuid
+    #assert isinstance(events[2], ServiceEvent)
+    #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[2].service_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[2].service_id.service_uuid.uuid == service_r1_r2_uuid
+    #assert isinstance(events[3], DeviceEvent)
+    #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[3].device_id.device_uuid.uuid == device_r1_uuid
+    #assert isinstance(events[4], DeviceEvent)
+    #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[4].device_id.device_uuid.uuid == device_r2_uuid
+    #assert isinstance(events[5], DeviceEvent)
+    #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[5].device_id.device_uuid.uuid == device_r3_uuid
+    #assert isinstance(events[6], TopologyEvent)
+    #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[6].topology_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[6].topology_id.topology_uuid.uuid == topology_uuid
+    #assert isinstance(events[7], ContextEvent)
+    #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[7].context_id.context_uuid.uuid == context_uuid
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    #events_collector.stop()
diff --git a/src/context/tests/test_context.py b/src/context/tests/test_context.py
new file mode 100644
index 0000000000000000000000000000000000000000..29d4442f95f8bf573770dd544c627b609c0f1375
--- /dev/null
+++ b/src/context/tests/test_context.py
@@ -0,0 +1,133 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, grpc, pytest #, time
+from common.proto.context_pb2 import Context, ContextId, Empty
+#from common.proto.context_pb2 import ContextEvent, EventTypeEnum
+from context.client.ContextClient import ContextClient
+#from context.client.EventsCollector import EventsCollector
+from context.service.database.uuids.Context import context_get_uuid
+#from .Constants import GET_EVENTS_TIMEOUT
+from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME
+
+def test_context(context_client : ContextClient) -> None:
+
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    #events_collector = EventsCollector(
+    #    context_client, log_events_received=True,
+    #    activate_context_collector = True, activate_topology_collector = False, activate_device_collector = False,
+    #    activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False,
+    #    activate_connection_collector = False)
+    #events_collector.start()
+    #time.sleep(3) # wait for the events collector to start
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    context_id = ContextId(**CONTEXT_ID)
+    context_uuid = context_get_uuid(context_id, allow_random=False)
+    with pytest.raises(grpc.RpcError) as e:
+        context_client.GetContext(context_id)
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    MSG = 'Context({:s}) not found; context_uuid generated was: {:s}'
+    assert e.value.details() == MSG.format(CONTEXT_NAME, context_uuid)
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client.ListContextIds(Empty())
+    assert len(response.context_ids) == 0
+
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == 0
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    response = context_client.SetContext(Context(**CONTEXT))
+    assert response.context_uuid.uuid == context_uuid
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, ContextEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert event.context_id.context_uuid.uuid == context_uuid
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client.GetContext(ContextId(**CONTEXT_ID))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    assert response.name == CONTEXT_NAME
+    assert len(response.topology_ids) == 0
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client.ListContextIds(Empty())
+    assert len(response.context_ids) == 1
+    assert response.context_ids[0].context_uuid.uuid == context_uuid
+
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == 1
+    assert response.contexts[0].context_id.context_uuid.uuid == context_uuid
+    assert response.contexts[0].name == CONTEXT_NAME
+    assert len(response.contexts[0].topology_ids) == 0
+    assert len(response.contexts[0].service_ids) == 0
+    assert len(response.contexts[0].slice_ids) == 0
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    new_context_name = 'new'
+    CONTEXT_WITH_NAME = copy.deepcopy(CONTEXT)
+    CONTEXT_WITH_NAME['name'] = new_context_name
+    response = context_client.SetContext(Context(**CONTEXT_WITH_NAME))
+    assert response.context_uuid.uuid == context_uuid
+
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, ContextEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    #assert event.context_id.context_uuid.uuid == context_uuid
+
+    # ----- Get when the object is modified ----------------------------------------------------------------------------
+    response = context_client.GetContext(ContextId(**CONTEXT_ID))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    assert response.name == new_context_name
+    assert len(response.topology_ids) == 0
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
+
+    # ----- List when the object is modified ---------------------------------------------------------------------------
+    response = context_client.ListContextIds(Empty())
+    assert len(response.context_ids) == 1
+    assert response.context_ids[0].context_uuid.uuid == context_uuid
+
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == 1
+    assert response.contexts[0].context_id.context_uuid.uuid == context_uuid
+    assert response.contexts[0].name == new_context_name
+    assert len(response.contexts[0].topology_ids) == 0
+    assert len(response.contexts[0].service_ids) == 0
+    assert len(response.contexts[0].slice_ids) == 0
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client.RemoveContext(ContextId(**CONTEXT_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, ContextEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert event.context_id.context_uuid.uuid == context_uuid
+
+    # ----- List after deleting the object -----------------------------------------------------------------------------
+    response = context_client.ListContextIds(Empty())
+    assert len(response.context_ids) == 0
+
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == 0
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    #events_collector.stop()
diff --git a/src/context/tests/test_device.py b/src/context/tests/test_device.py
new file mode 100644
index 0000000000000000000000000000000000000000..9afe64f570ed26fdc4c0d8bf67e89079930ab938
--- /dev/null
+++ b/src/context/tests/test_device.py
@@ -0,0 +1,206 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, grpc, pytest #, time
+from common.proto.context_pb2 import (
+    Context, ContextId, Device, DeviceDriverEnum, DeviceId, DeviceOperationalStatusEnum, Empty, Topology, TopologyId)
+#from common.proto.context_pb2 import ContextEvent, DeviceEvent, EventTypeEnum, TopologyEvent
+from context.client.ContextClient import ContextClient
+#from context.client.EventsCollector import EventsCollector
+from context.service.database.uuids.Device import device_get_uuid
+#from .Constants import GET_EVENTS_TIMEOUT
+from .Objects import CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_NAME, TOPOLOGY, TOPOLOGY_ID
+
+@pytest.mark.depends(on=['context/tests/test_topology.py::test_topology'])
+def test_device(context_client : ContextClient) -> None:
+
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    #events_collector = EventsCollector(
+    #    context_client, log_events_received=True,
+    #    activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True,
+    #    activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False,
+    #    activate_connection_collector = False)
+    #events_collector.start()
+    #time.sleep(3)
+
+    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
+    response = context_client.SetContext(Context(**CONTEXT))
+    context_uuid = response.context_uuid.uuid
+
+    response = context_client.SetTopology(Topology(**TOPOLOGY))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    topology_uuid = response.topology_uuid.uuid
+
+    #events = events_collector.get_events(block=True, count=2, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(events[0], ContextEvent)
+    #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[0].context_id.context_uuid.uuid == context_uuid
+    #assert isinstance(events[1], TopologyEvent)
+    #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[1].topology_id.topology_uuid.uuid == topology_uuid
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    device_id = DeviceId(**DEVICE_R1_ID)
+    device_uuid = device_get_uuid(device_id, allow_random=False)
+    with pytest.raises(grpc.RpcError) as e:
+        context_client.GetDevice(device_id)
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    MSG = 'Device({:s}) not found; device_uuid generated was: {:s}'
+    assert e.value.details() == MSG.format(DEVICE_R1_NAME, device_uuid)
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client.ListDeviceIds(Empty())
+    assert len(response.device_ids) == 0
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == 0
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    with pytest.raises(grpc.RpcError) as e:
+        WRONG_DEVICE = copy.deepcopy(DEVICE_R1)
+        WRONG_DEVICE_UUID = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
+        WRONG_DEVICE['device_endpoints'][0]['endpoint_id']['device_id']['device_uuid']['uuid'] = WRONG_DEVICE_UUID
+        context_client.SetDevice(Device(**WRONG_DEVICE))
+    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
+    MSG = 'request.device_endpoints[0].device_id.device_uuid.uuid({}) is invalid; '\
+          'should be == request.device_id.device_uuid.uuid({})'
+    assert e.value.details() == MSG.format(WRONG_DEVICE_UUID, device_id.device_uuid.uuid) # pylint: disable=no-member
+
+    response = context_client.SetDevice(Device(**DEVICE_R1))
+    assert response.device_uuid.uuid == device_uuid
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, DeviceEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert event.device_id.device_uuid.uuid == device_uuid
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client.GetDevice(DeviceId(**DEVICE_R1_ID))
+    assert response.device_id.device_uuid.uuid == device_uuid
+    assert response.name == DEVICE_R1_NAME
+    assert response.device_type == 'packet-router'
+    assert len(response.device_config.config_rules) == 3
+    assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
+    assert len(response.device_drivers) == 1
+    assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.device_drivers
+    assert len(response.device_endpoints) == 4
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client.ListDeviceIds(Empty())
+    assert len(response.device_ids) == 1
+    assert response.device_ids[0].device_uuid.uuid == device_uuid
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == 1
+    assert response.devices[0].device_id.device_uuid.uuid == device_uuid
+    assert response.devices[0].name == DEVICE_R1_NAME
+    assert response.devices[0].device_type == 'packet-router'
+    assert len(response.devices[0].device_config.config_rules) == 3
+    assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
+    assert len(response.devices[0].device_drivers) == 1
+    assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.devices[0].device_drivers
+    assert len(response.devices[0].device_endpoints) == 4
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    new_device_name = 'new'
+    new_device_driver = DeviceDriverEnum.DEVICEDRIVER_UNDEFINED
+    DEVICE_UPDATED = copy.deepcopy(DEVICE_R1)
+    DEVICE_UPDATED['name'] = new_device_name
+    DEVICE_UPDATED['device_operational_status'] = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+    DEVICE_UPDATED['device_drivers'].append(new_device_driver)
+    response = context_client.SetDevice(Device(**DEVICE_UPDATED))
+    assert response.device_uuid.uuid == device_uuid
+
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, DeviceEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    #assert event.device_id.device_uuid.uuid == device_uuid
+
+    # ----- Get when the object is modified ----------------------------------------------------------------------------
+    response = context_client.GetDevice(DeviceId(**DEVICE_R1_ID))
+    assert response.device_id.device_uuid.uuid == device_uuid
+    assert response.name == new_device_name
+    assert response.device_type == 'packet-router'
+    assert len(response.device_config.config_rules) == 3
+    assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+    assert len(response.device_drivers) == 2
+    assert DeviceDriverEnum.DEVICEDRIVER_UNDEFINED in response.device_drivers
+    assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.device_drivers
+    assert len(response.device_endpoints) == 4
+
+    # ----- List when the object is modified ---------------------------------------------------------------------------
+    response = context_client.ListDeviceIds(Empty())
+    assert len(response.device_ids) == 1
+    assert response.device_ids[0].device_uuid.uuid == device_uuid
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == 1
+    assert response.devices[0].device_id.device_uuid.uuid == device_uuid
+    assert response.devices[0].name == new_device_name
+    assert response.devices[0].device_type == 'packet-router'
+    assert len(response.devices[0].device_config.config_rules) == 3
+    assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+    assert len(response.devices[0].device_drivers) == 2
+    assert DeviceDriverEnum.DEVICEDRIVER_UNDEFINED in response.devices[0].device_drivers
+    assert DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG in response.devices[0].device_drivers
+    assert len(response.devices[0].device_endpoints) == 4
+
+    # ----- Check relation was created ---------------------------------------------------------------------------------
+    response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID))
+    assert response.topology_id.context_id.context_uuid.uuid == context_uuid
+    assert response.topology_id.topology_uuid.uuid == topology_uuid
+    assert len(response.device_ids) == 1
+    assert response.device_ids[0].device_uuid.uuid == device_uuid
+    assert len(response.link_ids) == 0
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, DeviceEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert event.device_id.device_uuid.uuid == device_uuid
+
+    # ----- List after deleting the object -----------------------------------------------------------------------------
+    response = context_client.ListDeviceIds(Empty())
+    assert len(response.device_ids) == 0
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == 0
+
+    response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID))
+    assert response.topology_id.context_id.context_uuid.uuid == context_uuid
+    assert response.topology_id.topology_uuid.uuid == topology_uuid
+    assert len(response.device_ids) == 0
+    assert len(response.link_ids) == 0
+
+    # ----- Clean dependencies used in the test and capture related events ---------------------------------------------
+    context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID))
+    context_client.RemoveContext(ContextId(**CONTEXT_ID))
+
+    #events = events_collector.get_events(block=True, count=2, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(events[0], TopologyEvent)
+    #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[0].topology_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[0].topology_id.topology_uuid.uuid == topology_uuid
+    #assert isinstance(events[1], ContextEvent)
+    #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[1].context_id.context_uuid.uuid == context_uuid
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    #events_collector.stop()
diff --git a/src/context/tests/test_hasher.py b/src/context/tests/test_hasher.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9a52f5d0222160ea434bc326e49e8a0f80ea622
--- /dev/null
+++ b/src/context/tests/test_hasher.py
@@ -0,0 +1,47 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, pytest
+from context.service.database.tools.FastHasher import (
+    FASTHASHER_DATA_ACCEPTED_FORMAT, FASTHASHER_ITEM_ACCEPTED_FORMAT, fast_hasher)
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+
+# ----- Test misc. Context internal tools ------------------------------------------------------------------------------
+
+def test_tools_fast_string_hasher():
+    with pytest.raises(TypeError) as e:
+        fast_hasher(27)
+    assert str(e.value) == "data(27) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'int'>"
+
+    with pytest.raises(TypeError) as e:
+        fast_hasher({27})
+    assert str(e.value) == "data({27}) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'set'>"
+
+    with pytest.raises(TypeError) as e:
+        fast_hasher({'27'})
+    assert str(e.value) == "data({'27'}) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'set'>"
+
+    with pytest.raises(TypeError) as e:
+        fast_hasher([27])
+    assert str(e.value) == "data[0](27) must be " + FASTHASHER_ITEM_ACCEPTED_FORMAT + ", found <class 'int'>"
+
+    fast_hasher('hello-world')
+    fast_hasher('hello-world'.encode('UTF-8'))
+    fast_hasher(['hello', 'world'])
+    fast_hasher(('hello', 'world'))
+    fast_hasher(['hello'.encode('UTF-8'), 'world'.encode('UTF-8')])
+    fast_hasher(('hello'.encode('UTF-8'), 'world'.encode('UTF-8')))
diff --git a/src/context/tests/test_link.py b/src/context/tests/test_link.py
new file mode 100644
index 0000000000000000000000000000000000000000..96021a449d3169cd0b0b6e813ae9d7db7b3a7585
--- /dev/null
+++ b/src/context/tests/test_link.py
@@ -0,0 +1,196 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, grpc, pytest #, time
+from common.proto.context_pb2 import Context, ContextId, Device, DeviceId, Empty, Link, LinkId, Topology, TopologyId
+#from common.proto.context_pb2 import ContextEvent, DeviceEvent, EventTypeEnum, LinkEvent, TopologyEvent
+from context.client.ContextClient import ContextClient
+#from context.client.EventsCollector import EventsCollector
+from context.service.database.uuids.Link import link_get_uuid
+#from .Constants import GET_EVENTS_TIMEOUT
+from .Objects import (
+    CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R2_NAME,
+    TOPOLOGY, TOPOLOGY_ID)
+
+@pytest.mark.depends(on=['context/tests/test_device.py::test_device'])
+def test_link(context_client : ContextClient) -> None:
+
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    #events_collector = EventsCollector(
+    #    context_client, log_events_received=True,
+    #    activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True,
+    #    activate_link_collector = True, activate_service_collector = False, activate_slice_collector = False,
+    #    activate_connection_collector = False)
+    #events_collector.start()
+    #time.sleep(3)
+
+    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
+    response = context_client.SetContext(Context(**CONTEXT))
+    context_uuid = response.context_uuid.uuid
+
+    response = context_client.SetTopology(Topology(**TOPOLOGY))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    topology_uuid = response.topology_uuid.uuid
+
+    response = context_client.SetDevice(Device(**DEVICE_R1))
+    device_r1_uuid = response.device_uuid.uuid
+
+    response = context_client.SetDevice(Device(**DEVICE_R2))
+    device_r2_uuid = response.device_uuid.uuid
+
+    #events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(events[0], ContextEvent)
+    #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[0].context_id.context_uuid.uuid == context_uuid
+    #assert isinstance(events[1], TopologyEvent)
+    #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[1].topology_id.topology_uuid.uuid == topology_uuid
+    #assert isinstance(events[2], DeviceEvent)
+    #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[2].device_id.device_uuid.uuid == device_r1_uuid
+    #assert isinstance(events[3], DeviceEvent)
+    #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[3].device_id.device_uuid.uuid == device_r2_uuid
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    link_id = LinkId(**LINK_R1_R2_ID)
+    link_uuid = link_get_uuid(link_id, allow_random=False)
+    with pytest.raises(grpc.RpcError) as e:
+        context_client.GetLink(link_id)
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    MSG = 'Link({:s}) not found; link_uuid generated was: {:s}'
+    assert e.value.details() == MSG.format(LINK_R1_R2_NAME, link_uuid)
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client.ListLinkIds(Empty())
+    assert len(response.link_ids) == 0
+
+    response = context_client.ListLinks(Empty())
+    assert len(response.links) == 0
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    response = context_client.SetLink(Link(**LINK_R1_R2))
+    assert response.link_uuid.uuid == link_uuid
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, LinkEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert event.link_id.link_uuid.uuid == link_uuid
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client.GetLink(LinkId(**LINK_R1_R2_ID))
+    assert response.link_id.link_uuid.uuid == link_uuid
+    assert response.name == LINK_R1_R2_NAME
+    assert len(response.link_endpoint_ids) == 2
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client.ListLinkIds(Empty())
+    assert len(response.link_ids) == 1
+    assert response.link_ids[0].link_uuid.uuid == link_uuid
+
+    response = context_client.ListLinks(Empty())
+    assert len(response.links) == 1
+    assert response.links[0].link_id.link_uuid.uuid == link_uuid
+    assert response.links[0].name == LINK_R1_R2_NAME
+    assert len(response.links[0].link_endpoint_ids) == 2
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    new_link_name = 'new'
+    LINK_UPDATED = copy.deepcopy(LINK_R1_R2)
+    LINK_UPDATED['name'] = new_link_name
+    response = context_client.SetLink(Link(**LINK_UPDATED))
+    assert response.link_uuid.uuid == link_uuid
+
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, LinkEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    #assert event.link_id.link_uuid.uuid == link_uuid
+
+    # ----- Get when the object is modified ----------------------------------------------------------------------------
+    response = context_client.GetLink(LinkId(**LINK_R1_R2_ID))
+    assert response.link_id.link_uuid.uuid == link_uuid
+    assert response.name == new_link_name
+    assert len(response.link_endpoint_ids) == 2
+
+    # ----- List when the object is modified ---------------------------------------------------------------------------
+    response = context_client.ListLinkIds(Empty())
+    assert len(response.link_ids) == 1
+    assert response.link_ids[0].link_uuid.uuid == link_uuid
+
+    response = context_client.ListLinks(Empty())
+    assert len(response.links) == 1
+    assert response.links[0].link_id.link_uuid.uuid == link_uuid
+    assert response.links[0].name == new_link_name
+    assert len(response.links[0].link_endpoint_ids) == 2
+
+    # ----- Check relation was created ---------------------------------------------------------------------------------
+    response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID))
+    assert response.topology_id.context_id.context_uuid.uuid == context_uuid
+    assert response.topology_id.topology_uuid.uuid == topology_uuid
+    assert len(response.device_ids) == 2
+    assert response.device_ids[0].device_uuid.uuid in {device_r1_uuid, device_r2_uuid}
+    assert response.device_ids[1].device_uuid.uuid in {device_r1_uuid, device_r2_uuid}
+    assert len(response.link_ids) == 1
+    assert response.link_ids[0].link_uuid.uuid == link_uuid
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client.RemoveLink(LinkId(**LINK_R1_R2_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, LinkEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert event.link_id.link_uuid.uuid == link_uuid
+
+    # ----- List after deleting the object -----------------------------------------------------------------------------
+    response = context_client.ListLinkIds(Empty())
+    assert len(response.link_ids) == 0
+
+    response = context_client.ListLinks(Empty())
+    assert len(response.links) == 0
+
+    response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID))
+    assert response.topology_id.context_id.context_uuid.uuid == context_uuid
+    assert response.topology_id.topology_uuid.uuid == topology_uuid
+    assert len(response.device_ids) == 2
+    assert response.device_ids[0].device_uuid.uuid in {device_r1_uuid, device_r2_uuid}
+    assert response.device_ids[1].device_uuid.uuid in {device_r1_uuid, device_r2_uuid}
+    assert len(response.link_ids) == 0
+
+    # ----- Clean dependencies used in the test and capture related events ---------------------------------------------
+    context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID))
+    context_client.RemoveDevice(DeviceId(**DEVICE_R2_ID))
+    context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID))
+    context_client.RemoveContext(ContextId(**CONTEXT_ID))
+
+    #events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(events[0], DeviceEvent)
+    #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[0].device_id.device_uuid.uuid == device_r1_uuid
+    #assert isinstance(events[1], DeviceEvent)
+    #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[1].device_id.device_uuid.uuid == device_r2_uuid
+    #assert isinstance(events[2], TopologyEvent)
+    #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[2].topology_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[2].topology_id.topology_uuid.uuid == topology_uuid
+    #assert isinstance(events[3], ContextEvent)
+    #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[3].context_id.context_uuid.uuid == context_uuid
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    #events_collector.stop()
diff --git a/src/context/tests/test_policy.py b/src/context/tests/test_policy.py
new file mode 100644
index 0000000000000000000000000000000000000000..1cc0b955760329c08a50e25f3240b93be76a2fcc
--- /dev/null
+++ b/src/context/tests/test_policy.py
@@ -0,0 +1,90 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, grpc, pytest
+from common.proto.context_pb2 import Empty
+from common.proto.policy_pb2 import PolicyRuleId, PolicyRule
+from context.client.ContextClient import ContextClient
+from context.service.database.uuids.PolicuRule import policyrule_get_uuid
+from .Objects import POLICYRULE, POLICYRULE_ID, POLICYRULE_NAME
+
+@pytest.mark.depends(on=['context/tests/test_connection.py::test_connection'])
+def test_policy(context_client : ContextClient):
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    policyrule_id = PolicyRuleId(**POLICYRULE_ID)
+    policyrule_uuid = policyrule_get_uuid(policyrule_id, allow_random=False)
+
+    with pytest.raises(grpc.RpcError) as e:
+        context_client.GetPolicyRule(policyrule_id)
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    MSG = 'PolicyRule({:s}) not found; policyrule_uuid generated was: {:s}'
+    assert e.value.details() == MSG.format(POLICYRULE_NAME, policyrule_uuid)
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client.ListPolicyRuleIds(Empty())
+    assert len(response.policyRuleIdList) == 0
+
+    response = context_client.ListPolicyRules(Empty())
+    assert len(response.policyRules) == 0
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    response = context_client.SetPolicyRule(PolicyRule(**POLICYRULE))
+    assert response.uuid.uuid == policyrule_uuid
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client.GetPolicyRule(PolicyRuleId(**POLICYRULE_ID))
+    assert response.device.policyRuleBasic.policyRuleId.uuid.uuid == policyrule_uuid
+    assert response.device.policyRuleBasic.priority == 1
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client.ListPolicyRuleIds(Empty())
+    assert len(response.policyRuleIdList) == 1
+    assert response.policyRuleIdList[0].uuid.uuid == policyrule_uuid
+
+    response = context_client.ListPolicyRules(Empty())
+    assert len(response.policyRules) == 1
+    assert response.policyRules[0].device.policyRuleBasic.policyRuleId.uuid.uuid == policyrule_uuid
+    assert response.policyRules[0].device.policyRuleBasic.priority == 1
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    new_policy_priority = 100
+    POLICYRULE_UPDATED = copy.deepcopy(POLICYRULE)
+    POLICYRULE_UPDATED['device']['policyRuleBasic']['priority'] = new_policy_priority
+    response = context_client.SetPolicyRule(PolicyRule(**POLICYRULE_UPDATED))
+    assert response.uuid.uuid == policyrule_uuid
+
+    # ----- Get when the object is modified ----------------------------------------------------------------------------
+    response = context_client.GetPolicyRule(PolicyRuleId(**POLICYRULE_ID))
+    assert response.device.policyRuleBasic.policyRuleId.uuid.uuid == policyrule_uuid
+
+    # ----- List when the object is modified ---------------------------------------------------------------------------
+    response = context_client.ListPolicyRuleIds(Empty())
+    assert len(response.policyRuleIdList) == 1
+    assert response.policyRuleIdList[0].uuid.uuid == policyrule_uuid
+
+    response = context_client.ListPolicyRules(Empty())
+    assert len(response.policyRules) == 1
+    assert response.policyRules[0].device.policyRuleBasic.policyRuleId.uuid.uuid == policyrule_uuid
+    assert response.policyRules[0].device.policyRuleBasic.priority == new_policy_priority
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client.RemovePolicyRule(PolicyRuleId(**POLICYRULE_ID))
+
+    # ----- List after deleting the object -----------------------------------------------------------------------------
+    response = context_client.ListPolicyRuleIds(Empty())
+    assert len(response.policyRuleIdList) == 0
+
+    response = context_client.ListPolicyRules(Empty())
+    assert len(response.policyRules) == 0
diff --git a/src/context/tests/test_service.py b/src/context/tests/test_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..0de7b49f2327e5bf2f63b89fa855aa327982d7e4
--- /dev/null
+++ b/src/context/tests/test_service.py
@@ -0,0 +1,238 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, grpc, pytest #, time
+from common.proto.context_pb2 import (
+    Context, ContextId, Device, DeviceId, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyId)
+#from common.proto.context_pb2 import (
+#    ContextEvent, DeviceEvent, EventTypeEnum, ServiceEvent, TopologyEvent)
+from context.client.ContextClient import ContextClient
+#from context.client.EventsCollector import EventsCollector
+from context.service.database.uuids.Service import service_get_uuid
+#from .Constants import GET_EVENTS_TIMEOUT
+from .Objects import (
+    CONTEXT, CONTEXT_ID, CONTEXT_NAME, DEVICE_R1, DEVICE_R1_ID, SERVICE_R1_R2_NAME, DEVICE_R2, DEVICE_R2_ID,
+    SERVICE_R1_R2, SERVICE_R1_R2_ID, TOPOLOGY, TOPOLOGY_ID)
+
+@pytest.mark.depends(on=['context/tests/test_link.py::test_link'])
+def test_service(context_client : ContextClient) -> None:
+
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    #events_collector = EventsCollector(
+    #    context_client, log_events_received=True,
+    #    activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True,
+    #    activate_link_collector = True, activate_service_collector = True, activate_slice_collector = False,
+    #    activate_connection_collector = False)
+    #events_collector.start()
+    #time.sleep(3)
+
+    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
+    response = context_client.SetContext(Context(**CONTEXT))
+    context_uuid = response.context_uuid.uuid
+
+    response = context_client.SetTopology(Topology(**TOPOLOGY))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    topology_uuid = response.topology_uuid.uuid
+
+    response = context_client.SetDevice(Device(**DEVICE_R1))
+    device_r1_uuid = response.device_uuid.uuid
+
+    response = context_client.SetDevice(Device(**DEVICE_R2))
+    device_r2_uuid = response.device_uuid.uuid
+
+    #events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(events[0], ContextEvent)
+    #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[0].context_id.context_uuid.uuid == context_uuid
+    #assert isinstance(events[1], TopologyEvent)
+    #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[1].topology_id.topology_uuid.uuid == topology_uuid
+    #assert isinstance(events[2], DeviceEvent)
+    #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[2].device_id.device_uuid.uuid == device_r1_uuid
+    #assert isinstance(events[3], DeviceEvent)
+    #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[3].device_id.device_uuid.uuid == device_r2_uuid
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    service_id = ServiceId(**SERVICE_R1_R2_ID)
+    context_uuid,service_uuid = service_get_uuid(service_id, allow_random=False)
+    with pytest.raises(grpc.RpcError) as e:
+        context_client.GetService(service_id)
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    MSG = 'Service({:s}/{:s}) not found; context_uuid generated was: {:s}; service_uuid generated was: {:s}'
+    assert e.value.details() == MSG.format(CONTEXT_NAME, SERVICE_R1_R2_NAME, context_uuid, service_uuid)
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client.GetContext(ContextId(**CONTEXT_ID))
+    assert len(response.topology_ids) == 1
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
+
+    response = context_client.ListServiceIds(ContextId(**CONTEXT_ID))
+    assert len(response.service_ids) == 0
+
+    response = context_client.ListServices(ContextId(**CONTEXT_ID))
+    assert len(response.services) == 0
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    with pytest.raises(grpc.RpcError) as e:
+        WRONG_UUID = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
+        WRONG_SERVICE = copy.deepcopy(SERVICE_R1_R2)
+        WRONG_SERVICE['service_endpoint_ids'][0]['topology_id']['context_id']['context_uuid']['uuid'] = WRONG_UUID
+        context_client.SetService(Service(**WRONG_SERVICE))
+    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
+    MSG = 'request.service_endpoint_ids[0].topology_id.context_id.context_uuid.uuid({}) is invalid; '\
+          'should be == request.service_id.context_id.context_uuid.uuid({})'
+    raw_context_uuid = service_id.context_id.context_uuid.uuid # pylint: disable=no-member
+    assert e.value.details() == MSG.format(WRONG_UUID, raw_context_uuid)
+
+    response = context_client.SetService(Service(**SERVICE_R1_R2))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    assert response.service_uuid.uuid == service_uuid
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, ServiceEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert event.service_id.context_id.context_uuid.uuid == context_uuid
+    #assert event.service_id.service_uuid.uuid == service_uuid
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client.GetContext(ContextId(**CONTEXT_ID))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    assert response.name == CONTEXT_NAME
+    assert len(response.topology_ids) == 1
+    assert len(response.service_ids) == 1
+    assert response.service_ids[0].context_id.context_uuid.uuid == context_uuid
+    assert response.service_ids[0].service_uuid.uuid == service_uuid
+    assert len(response.slice_ids) == 0
+
+    response = context_client.GetService(ServiceId(**SERVICE_R1_R2_ID))
+    assert response.service_id.context_id.context_uuid.uuid == context_uuid
+    assert response.service_id.service_uuid.uuid == service_uuid
+    assert response.name == SERVICE_R1_R2_NAME
+    assert response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM
+    assert len(response.service_endpoint_ids) == 2
+    assert len(response.service_constraints) == 2
+    assert response.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED
+    assert len(response.service_config.config_rules) == 3
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client.ListServiceIds(ContextId(**CONTEXT_ID))
+    assert len(response.service_ids) == 1
+    assert response.service_ids[0].context_id.context_uuid.uuid == context_uuid
+    assert response.service_ids[0].service_uuid.uuid == service_uuid
+
+    response = context_client.ListServices(ContextId(**CONTEXT_ID))
+    assert len(response.services) == 1
+    assert response.services[0].service_id.context_id.context_uuid.uuid == context_uuid
+    assert response.services[0].service_id.service_uuid.uuid == service_uuid
+    assert response.services[0].name == SERVICE_R1_R2_NAME
+    assert response.services[0].service_type == ServiceTypeEnum.SERVICETYPE_L3NM
+    assert len(response.services[0].service_endpoint_ids) == 2
+    assert len(response.services[0].service_constraints) == 2
+    assert response.services[0].service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED
+    assert len(response.services[0].service_config.config_rules) == 3
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    new_service_name = 'new'
+    SERVICE_UPDATED = copy.deepcopy(SERVICE_R1_R2)
+    SERVICE_UPDATED['name'] = new_service_name
+    SERVICE_UPDATED['service_status']['service_status'] = ServiceStatusEnum.SERVICESTATUS_ACTIVE
+    response = context_client.SetService(Service(**SERVICE_UPDATED))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    assert response.service_uuid.uuid == service_uuid
+
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, ServiceEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    #assert event.service_id.context_id.context_uuid.uuid == context_uuid
+    #assert event.service_id.service_uuid.uuid == service_uuid
+
+    # ----- Get when the object is modified ----------------------------------------------------------------------------
+    response = context_client.GetService(ServiceId(**SERVICE_R1_R2_ID))
+    assert response.service_id.context_id.context_uuid.uuid == context_uuid
+    assert response.service_id.service_uuid.uuid == service_uuid
+    assert response.name == new_service_name
+    assert response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM
+    assert len(response.service_endpoint_ids) == 2
+    assert len(response.service_constraints) == 2
+    assert response.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE
+    assert len(response.service_config.config_rules) == 3
+
+    # ----- List when the object is modified ---------------------------------------------------------------------------
+    response = context_client.ListServiceIds(ContextId(**CONTEXT_ID))
+    assert len(response.service_ids) == 1
+    assert response.service_ids[0].context_id.context_uuid.uuid == context_uuid
+    assert response.service_ids[0].service_uuid.uuid == service_uuid
+
+    response = context_client.ListServices(ContextId(**CONTEXT_ID))
+    assert len(response.services) == 1
+    assert response.services[0].service_id.context_id.context_uuid.uuid == context_uuid
+    assert response.services[0].service_id.service_uuid.uuid == service_uuid
+    assert response.services[0].name == new_service_name
+    assert response.services[0].service_type == ServiceTypeEnum.SERVICETYPE_L3NM
+    assert len(response.services[0].service_endpoint_ids) == 2
+    assert len(response.services[0].service_constraints) == 2
+    assert response.services[0].service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE
+    assert len(response.services[0].service_config.config_rules) == 3
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client.RemoveService(ServiceId(**SERVICE_R1_R2_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, ServiceEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert event.service_id.context_id.context_uuid.uuid == context_uuid
+    #assert event.service_id.service_uuid.uuid == service_uuid
+
+    # ----- List after deleting the object -----------------------------------------------------------------------------
+    response = context_client.GetContext(ContextId(**CONTEXT_ID))
+    assert len(response.topology_ids) == 1
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
+
+    response = context_client.ListServiceIds(ContextId(**CONTEXT_ID))
+    assert len(response.service_ids) == 0
+
+    response = context_client.ListServices(ContextId(**CONTEXT_ID))
+    assert len(response.services) == 0
+
+    # ----- Clean dependencies used in the test and capture related events ---------------------------------------------
+    context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID))
+    context_client.RemoveDevice(DeviceId(**DEVICE_R2_ID))
+    context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID))
+    context_client.RemoveContext(ContextId(**CONTEXT_ID))
+
+    #events = events_collector.get_events(block=True, count=4, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(events[0], DeviceEvent)
+    #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[0].device_id.device_uuid.uuid == device_r1_uuid
+    #assert isinstance(events[1], DeviceEvent)
+    #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[1].device_id.device_uuid.uuid == device_r2_uuid
+    #assert isinstance(events[2], TopologyEvent)
+    #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[2].topology_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[2].topology_id.topology_uuid.uuid == topology_uuid
+    #assert isinstance(events[3], ContextEvent)
+    #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[3].context_id.context_uuid.uuid == context_uuid
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    #events_collector.stop()
diff --git a/src/context/tests/test_slice.py b/src/context/tests/test_slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..22b2eeb89f3d0f39a9cf91a8d9b8e4220a9a1b00
--- /dev/null
+++ b/src/context/tests/test_slice.py
@@ -0,0 +1,302 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, grpc, pytest, time
+from common.proto.context_pb2 import (
+    Context, ContextId, Device, DeviceId, Link, LinkId, Service, ServiceId, Slice, SliceId, SliceStatusEnum, Topology,
+    TopologyId)
+#from common.proto.context_pb2 import (
+#    ContextEvent, DeviceEvent, EventTypeEnum, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent)
+from context.client.ContextClient import ContextClient
+#from context.client.EventsCollector import EventsCollector
+from context.service.database.uuids.Slice import slice_get_uuid
+#from .Constants import GET_EVENTS_TIMEOUT
+from .Objects import (
+    CONTEXT, CONTEXT_ID, CONTEXT_NAME, DEVICE_R1, DEVICE_R1_ID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R3, DEVICE_R3_ID,
+    LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R3, LINK_R1_R3_ID, LINK_R2_R3, LINK_R2_R3_ID, SERVICE_R1_R2, SERVICE_R1_R2_ID,
+    SERVICE_R2_R3, SERVICE_R2_R3_ID, SLICE_R1_R3, SLICE_R1_R3_ID, SLICE_R1_R3_NAME, TOPOLOGY, TOPOLOGY_ID)
+
+@pytest.mark.depends(on=['context/tests/test_service.py::test_service'])
+def test_slice(context_client : ContextClient) -> None:
+
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    #events_collector = EventsCollector(
+    #    context_client, log_events_received=True,
+    #    activate_context_collector = True, activate_topology_collector = True, activate_device_collector = True,
+    #    activate_link_collector = True, activate_service_collector = True, activate_slice_collector = True,
+    #    activate_connection_collector = False)
+    #events_collector.start()
+    #time.sleep(3)
+
+    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
+    response = context_client.SetContext(Context(**CONTEXT))
+    context_uuid = response.context_uuid.uuid
+
+    response = context_client.SetTopology(Topology(**TOPOLOGY))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    topology_uuid = response.topology_uuid.uuid
+
+    response = context_client.SetDevice(Device(**DEVICE_R1))
+    device_r1_uuid = response.device_uuid.uuid
+
+    response = context_client.SetDevice(Device(**DEVICE_R2))
+    device_r2_uuid = response.device_uuid.uuid
+
+    response = context_client.SetDevice(Device(**DEVICE_R3))
+    device_r3_uuid = response.device_uuid.uuid
+    
+    response = context_client.SetLink(Link(**LINK_R1_R2))
+    link_r1_r2_uuid = response.link_uuid.uuid
+    
+    response = context_client.SetLink(Link(**LINK_R1_R3))
+    link_r1_r3_uuid = response.link_uuid.uuid
+    
+    response = context_client.SetLink(Link(**LINK_R2_R3))
+    link_r2_r3_uuid = response.link_uuid.uuid
+    
+    response = context_client.SetService(Service(**SERVICE_R1_R2))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    service_r1_r2_uuid = response.service_uuid.uuid
+
+    response = context_client.SetService(Service(**SERVICE_R2_R3))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    service_r2_r3_uuid = response.service_uuid.uuid
+
+    #events = events_collector.get_events(block=True, count=10, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(events[0], ContextEvent)
+    #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[0].context_id.context_uuid.uuid == context_uuid
+    #assert isinstance(events[1], TopologyEvent)
+    #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[1].topology_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[1].topology_id.topology_uuid.uuid == topology_uuid
+    #assert isinstance(events[2], DeviceEvent)
+    #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[2].device_id.device_uuid.uuid == device_r1_uuid
+    #assert isinstance(events[3], DeviceEvent)
+    #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[3].device_id.device_uuid.uuid == device_r2_uuid
+    #assert isinstance(events[4], DeviceEvent)
+    #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[4].device_id.device_uuid.uuid == device_r3_uuid
+    #assert isinstance(events[5], LinkEvent)
+    #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[5].link_id.link_uuid.uuid == link_r1_r2_uuid
+    #assert isinstance(events[6], LinkEvent)
+    #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[6].link_id.link_uuid.uuid == link_r1_r3_uuid
+    #assert isinstance(events[7], LinkEvent)
+    #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[7].link_id.link_uuid.uuid == link_r2_r3_uuid
+    #assert isinstance(events[8], ServiceEvent)
+    #assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[8].service_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[8].service_id.service_uuid.uuid == service_r1_r2_uuid
+    #assert isinstance(events[9], ServiceEvent)
+    #assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert events[9].service_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[9].service_id.service_uuid.uuid == service_r2_r3_uuid
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    slice_id = SliceId(**SLICE_R1_R3_ID)
+    context_uuid,slice_uuid = slice_get_uuid(slice_id, allow_random=False)
+    with pytest.raises(grpc.RpcError) as e:
+        context_client.GetSlice(slice_id)
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    MSG = 'Slice({:s}/{:s}) not found; context_uuid generated was: {:s}; slice_uuid generated was: {:s}'
+    assert e.value.details() == MSG.format(CONTEXT_NAME, SLICE_R1_R3_NAME, context_uuid, slice_uuid)
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client.GetContext(ContextId(**CONTEXT_ID))
+    assert len(response.topology_ids) == 1
+    assert len(response.service_ids) == 2
+    assert len(response.slice_ids) == 0
+
+    response = context_client.ListSliceIds(ContextId(**CONTEXT_ID))
+    assert len(response.slice_ids) == 0
+
+    response = context_client.ListSlices(ContextId(**CONTEXT_ID))
+    assert len(response.slices) == 0
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    with pytest.raises(grpc.RpcError) as e:
+        WRONG_UUID = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
+        WRONG_SLICE = copy.deepcopy(SLICE_R1_R3)
+        WRONG_SLICE['slice_endpoint_ids'][0]['topology_id']['context_id']['context_uuid']['uuid'] = WRONG_UUID
+        context_client.SetSlice(Slice(**WRONG_SLICE))
+    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
+    MSG = 'request.slice_endpoint_ids[0].topology_id.context_id.context_uuid.uuid({}) is invalid; '\
+          'should be == request.slice_id.context_id.context_uuid.uuid({})'
+    raw_context_uuid = slice_id.context_id.context_uuid.uuid # pylint: disable=no-member
+    assert e.value.details() == MSG.format(WRONG_UUID, raw_context_uuid)
+
+    response = context_client.SetSlice(Slice(**SLICE_R1_R3))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    assert response.slice_uuid.uuid == slice_uuid
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, SliceEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert event.slice_id.context_id.context_uuid.uuid == context_uuid
+    #assert event.slice_id.slice_uuid.uuid == slice_uuid
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client.GetContext(ContextId(**CONTEXT_ID))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    assert response.name == CONTEXT_NAME
+    assert len(response.topology_ids) == 1
+    assert len(response.service_ids) == 2
+    assert len(response.slice_ids) == 1
+    assert response.slice_ids[0].context_id.context_uuid.uuid == context_uuid
+    assert response.slice_ids[0].slice_uuid.uuid == slice_uuid
+
+    response = context_client.GetSlice(SliceId(**SLICE_R1_R3_ID))
+    assert response.slice_id.context_id.context_uuid.uuid == context_uuid
+    assert response.slice_id.slice_uuid.uuid == slice_uuid
+    assert response.name == SLICE_R1_R3_NAME
+    assert len(response.slice_endpoint_ids) == 2
+    assert len(response.slice_constraints) == 2
+    assert response.slice_status.slice_status == SliceStatusEnum.SLICESTATUS_PLANNED
+    assert len(response.slice_config.config_rules) == 3
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client.ListSliceIds(ContextId(**CONTEXT_ID))
+    assert len(response.slice_ids) == 1
+    assert response.slice_ids[0].context_id.context_uuid.uuid == context_uuid
+    assert response.slice_ids[0].slice_uuid.uuid == slice_uuid
+
+    response = context_client.ListSlices(ContextId(**CONTEXT_ID))
+    assert len(response.slices) == 1
+    assert response.slices[0].slice_id.context_id.context_uuid.uuid == context_uuid
+    assert response.slices[0].slice_id.slice_uuid.uuid == slice_uuid
+    assert response.slices[0].name == SLICE_R1_R3_NAME
+    assert len(response.slices[0].slice_endpoint_ids) == 2
+    assert len(response.slices[0].slice_constraints) == 2
+    assert response.slices[0].slice_status.slice_status == SliceStatusEnum.SLICESTATUS_PLANNED
+    assert len(response.slices[0].slice_config.config_rules) == 3
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    new_slice_name = 'new'
+    SLICE_UPDATED = copy.deepcopy(SLICE_R1_R3)
+    SLICE_UPDATED['name'] = new_slice_name
+    SLICE_UPDATED['slice_status']['slice_status'] = SliceStatusEnum.SLICESTATUS_ACTIVE
+    response = context_client.SetSlice(Slice(**SLICE_UPDATED))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    assert response.slice_uuid.uuid == slice_uuid
+
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, SliceEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    #assert event.slice_id.context_id.context_uuid.uuid == context_uuid
+    #assert event.slice_id.slice_uuid.uuid == slice_uuid
+
+    # ----- Get when the object is modified ----------------------------------------------------------------------------
+    response = context_client.GetSlice(SliceId(**SLICE_R1_R3_ID))
+    assert response.slice_id.context_id.context_uuid.uuid == context_uuid
+    assert response.slice_id.slice_uuid.uuid == slice_uuid
+    assert response.name == new_slice_name
+    assert len(response.slice_endpoint_ids) == 2
+    assert len(response.slice_constraints) == 2
+    assert response.slice_status.slice_status == SliceStatusEnum.SLICESTATUS_ACTIVE
+    assert len(response.slice_config.config_rules) == 3
+
+    # ----- List when the object is modified ---------------------------------------------------------------------------
+    response = context_client.ListSliceIds(ContextId(**CONTEXT_ID))
+    assert len(response.slice_ids) == 1
+    assert response.slice_ids[0].context_id.context_uuid.uuid == context_uuid
+    assert response.slice_ids[0].slice_uuid.uuid == slice_uuid
+
+    response = context_client.ListSlices(ContextId(**CONTEXT_ID))
+    assert len(response.slices) == 1
+    assert response.slices[0].slice_id.context_id.context_uuid.uuid == context_uuid
+    assert response.slices[0].slice_id.slice_uuid.uuid == slice_uuid
+    assert response.slices[0].name == new_slice_name
+    assert len(response.slices[0].slice_endpoint_ids) == 2
+    assert len(response.slices[0].slice_constraints) == 2
+    assert response.slices[0].slice_status.slice_status == SliceStatusEnum.SLICESTATUS_ACTIVE
+    assert len(response.slices[0].slice_config.config_rules) == 3
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client.RemoveSlice(SliceId(**SLICE_R1_R3_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, SliceEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert event.slice_id.context_id.context_uuid.uuid == context_uuid
+    #assert event.slice_id.slice_uuid.uuid == slice_uuid
+
+    # ----- List after deleting the object -----------------------------------------------------------------------------
+    response = context_client.GetContext(ContextId(**CONTEXT_ID))
+    assert len(response.topology_ids) == 1
+    assert len(response.service_ids) == 2
+    assert len(response.slice_ids) == 0
+
+    response = context_client.ListSliceIds(ContextId(**CONTEXT_ID))
+    assert len(response.slice_ids) == 0
+
+    response = context_client.ListSlices(ContextId(**CONTEXT_ID))
+    assert len(response.slices) == 0
+
+    # ----- Clean dependencies used in the test and capture related events ---------------------------------------------
+    context_client.RemoveService(ServiceId(**SERVICE_R1_R2_ID))
+    context_client.RemoveService(ServiceId(**SERVICE_R2_R3_ID))
+    context_client.RemoveLink(LinkId(**LINK_R1_R2_ID))
+    context_client.RemoveLink(LinkId(**LINK_R1_R3_ID))
+    context_client.RemoveLink(LinkId(**LINK_R2_R3_ID))
+    context_client.RemoveDevice(DeviceId(**DEVICE_R1_ID))
+    context_client.RemoveDevice(DeviceId(**DEVICE_R2_ID))
+    context_client.RemoveDevice(DeviceId(**DEVICE_R3_ID))
+    context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID))
+    context_client.RemoveContext(ContextId(**CONTEXT_ID))
+
+    #events = events_collector.get_events(block=True, count=10)
+    #assert isinstance(events[0], ServiceEvent)
+    #assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[0].service_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[0].service_id.service_uuid.uuid == service_r1_r2_uuid
+    #assert isinstance(events[1], ServiceEvent)
+    #assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[1].service_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[1].service_id.service_uuid.uuid == service_r2_r3_uuid
+    #assert isinstance(events[2], LinkEvent)
+    #assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[2].link_id.link_uuid.uuid == link_r1_r2_uuid
+    #assert isinstance(events[3], LinkEvent)
+    #assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[3].link_id.link_uuid.uuid == link_r1_r3_uuid
+    #assert isinstance(events[4], LinkEvent)
+    #assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[4].link_id.link_uuid.uuid == link_r2_r3_uuid
+    #assert isinstance(events[5], DeviceEvent)
+    #assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[5].device_id.device_uuid.uuid == device_r1_uuid
+    #assert isinstance(events[6], DeviceEvent)
+    #assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[6].device_id.device_uuid.uuid == device_r2_uuid
+    #assert isinstance(events[7], DeviceEvent)
+    #assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[7].device_id.device_uuid.uuid == device_r3_uuid
+    #assert isinstance(events[8], TopologyEvent)
+    #assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[8].topology_id.context_id.context_uuid.uuid == context_uuid
+    #assert events[8].topology_id.topology_uuid.uuid == topology_uuid
+    #assert isinstance(events[9], ContextEvent)
+    #assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert events[9].context_id.context_uuid.uuid == context_uuid
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    #events_collector.stop()
diff --git a/src/context/tests/test_topology.py b/src/context/tests/test_topology.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2afd9643037bdc9c9b893fe2c55e7309db8ffe1
--- /dev/null
+++ b/src/context/tests/test_topology.py
@@ -0,0 +1,177 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, grpc, pytest #, time
+from common.proto.context_pb2 import (
+    Context, ContextEvent, ContextId, EventTypeEnum, Topology, TopologyEvent, TopologyId)
+from context.client.ContextClient import ContextClient
+#from context.client.EventsCollector import EventsCollector
+from context.service.database.uuids.Topology import topology_get_uuid
+from .Constants import GET_EVENTS_TIMEOUT
+from .Objects import CONTEXT, CONTEXT_ID, CONTEXT_NAME, TOPOLOGY, TOPOLOGY_ID, TOPOLOGY_NAME
+
+@pytest.mark.depends(on=['context/tests/test_context.py::test_context'])
+def test_topology(context_client : ContextClient) -> None:
+
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    #events_collector = EventsCollector(
+    #    context_client, log_events_received=True,
+    #    activate_context_collector = True, activate_topology_collector = True, activate_device_collector = False,
+    #    activate_link_collector = False, activate_service_collector = False, activate_slice_collector = False,
+    #    activate_connection_collector = False)
+    #events_collector.start()
+    #time.sleep(3) # wait for the events collector to start
+
+    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
+    response = context_client.SetContext(Context(**CONTEXT))
+    context_uuid = response.context_uuid.uuid
+
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, ContextEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert event.context_id.context_uuid.uuid == context_uuid
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    topology_id = TopologyId(**TOPOLOGY_ID)
+    context_uuid,topology_uuid = topology_get_uuid(topology_id, allow_random=False)
+    with pytest.raises(grpc.RpcError) as e:
+        context_client.GetTopology(topology_id)
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    MSG = 'Topology({:s}/{:s}) not found; context_uuid generated was: {:s}; topology_uuid generated was: {:s}'
+    assert e.value.details() == MSG.format(CONTEXT_NAME, TOPOLOGY_NAME, context_uuid, topology_uuid)
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client.GetContext(ContextId(**CONTEXT_ID))
+    assert len(response.topology_ids) == 0
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
+
+    response = context_client.ListTopologyIds(ContextId(**CONTEXT_ID))
+    assert len(response.topology_ids) == 0
+
+    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
+    assert len(response.topologies) == 0
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    response = context_client.SetTopology(Topology(**TOPOLOGY))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    assert response.topology_uuid.uuid == topology_uuid
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, TopologyEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    #assert event.topology_id.context_id.context_uuid.uuid == context_uuid
+    #assert event.topology_id.topology_uuid.uuid == topology_uuid
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client.GetContext(ContextId(**CONTEXT_ID))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    assert response.name == CONTEXT_NAME
+    assert len(response.topology_ids) == 1
+    assert response.topology_ids[0].context_id.context_uuid.uuid == context_uuid
+    assert response.topology_ids[0].topology_uuid.uuid == topology_uuid
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
+
+    response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID))
+    assert response.topology_id.context_id.context_uuid.uuid == context_uuid
+    assert response.topology_id.topology_uuid.uuid == topology_uuid
+    assert response.name == TOPOLOGY_NAME
+    assert len(response.device_ids) == 0
+    assert len(response.link_ids) == 0
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client.ListTopologyIds(ContextId(**CONTEXT_ID))
+    assert len(response.topology_ids) == 1
+    assert response.topology_ids[0].context_id.context_uuid.uuid == context_uuid
+    assert response.topology_ids[0].topology_uuid.uuid == topology_uuid
+
+    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
+    assert len(response.topologies) == 1
+    assert response.topologies[0].topology_id.context_id.context_uuid.uuid == context_uuid
+    assert response.topologies[0].topology_id.topology_uuid.uuid == topology_uuid
+    assert response.topologies[0].name == TOPOLOGY_NAME
+    assert len(response.topologies[0].device_ids) == 0
+    assert len(response.topologies[0].link_ids) == 0
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    new_topology_name = 'new'
+    TOPOLOGY_UPDATED = copy.deepcopy(TOPOLOGY)
+    TOPOLOGY_UPDATED['name'] = new_topology_name
+    response = context_client.SetTopology(Topology(**TOPOLOGY_UPDATED))
+    assert response.context_id.context_uuid.uuid == context_uuid
+    assert response.topology_uuid.uuid == topology_uuid
+
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, TopologyEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    #assert event.topology_id.context_id.context_uuid.uuid == context_uuid
+    #assert event.topology_id.topology_uuid.uuid == topology_uuid
+
+    # ----- Get when the object is modified ----------------------------------------------------------------------------
+    response = context_client.GetTopology(TopologyId(**TOPOLOGY_ID))
+    assert response.topology_id.context_id.context_uuid.uuid == context_uuid
+    assert response.topology_id.topology_uuid.uuid == topology_uuid
+    assert response.name == new_topology_name
+    assert len(response.device_ids) == 0
+    assert len(response.link_ids) == 0
+
+    # ----- List when the object is modified ---------------------------------------------------------------------------
+    response = context_client.ListTopologyIds(ContextId(**CONTEXT_ID))
+    assert len(response.topology_ids) == 1
+    assert response.topology_ids[0].context_id.context_uuid.uuid == context_uuid
+    assert response.topology_ids[0].topology_uuid.uuid == topology_uuid
+
+    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
+    assert len(response.topologies) == 1
+    assert response.topologies[0].topology_id.context_id.context_uuid.uuid == context_uuid
+    assert response.topologies[0].topology_id.topology_uuid.uuid == topology_uuid
+    assert response.topologies[0].name == new_topology_name
+    assert len(response.topologies[0].device_ids) == 0
+    assert len(response.topologies[0].link_ids) == 0
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client.RemoveTopology(TopologyId(**TOPOLOGY_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, TopologyEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert event.topology_id.context_id.context_uuid.uuid == context_uuid
+    #assert event.topology_id.topology_uuid.uuid == topology_uuid
+
+    # ----- List after deleting the object -----------------------------------------------------------------------------
+    response = context_client.GetContext(ContextId(**CONTEXT_ID))
+    assert len(response.topology_ids) == 0
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
+
+    response = context_client.ListTopologyIds(ContextId(**CONTEXT_ID))
+    assert len(response.topology_ids) == 0
+
+    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
+    assert len(response.topologies) == 0
+
+    # ----- Clean dependencies used in the test and capture related events ---------------------------------------------
+    context_client.RemoveContext(ContextId(**CONTEXT_ID))
+
+    #event = events_collector.get_event(block=True, timeout=GET_EVENTS_TIMEOUT)
+    #assert isinstance(event, ContextEvent)
+    #assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    #assert event.context_id.context_uuid.uuid == context_uuid
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    #events_collector.stop()
diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py
deleted file mode 100644
index 022c0472039d526e488f8a69096fae8c0edbdb48..0000000000000000000000000000000000000000
--- a/src/context/tests/test_unitary.py
+++ /dev/null
@@ -1,1421 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# pylint: disable=too-many-lines
-import copy, grpc, logging, os, pytest, requests, time, urllib
-from typing import Tuple
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, ServiceNameEnum
-from common.Settings import (
-    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name,
-    get_service_baseurl_http, get_service_port_grpc, get_service_port_http)
-from common.orm.Database import Database
-from common.orm.Factory import get_database_backend, BackendEnum as DatabaseBackendEnum
-from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum
-from common.message_broker.MessageBroker import MessageBroker
-from common.proto.context_pb2 import (
-    Connection, ConnectionEvent, ConnectionId, Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId,
-    DeviceOperationalStatusEnum, Empty, EventTypeEnum, Link, LinkEvent, LinkId, Service, ServiceEvent, ServiceId,
-    ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyEvent, TopologyId)
-from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule)
-from common.type_checkers.Assertions import (
-    validate_connection, validate_connection_ids, validate_connections, validate_context, validate_context_ids,
-    validate_contexts, validate_device, validate_device_ids, validate_devices, validate_link, validate_link_ids,
-    validate_links, validate_service, validate_service_ids, validate_services, validate_topologies, validate_topology,
-    validate_topology_ids)
-from context.client.ContextClient import ContextClient
-from context.client.EventsCollector import EventsCollector
-from context.service.database.Tools import (
-    FASTHASHER_DATA_ACCEPTED_FORMAT, FASTHASHER_ITEM_ACCEPTED_FORMAT, fast_hasher)
-from context.service.grpc_server.ContextService import ContextService
-from context.service.Populate import populate
-from context.service.rest_server.RestServer import RestServer
-from context.service.rest_server.Resources import RESOURCES
-from .Objects import (
-    CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID,
-    DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, DEVICE_R3, DEVICE_R3_ID, DEVICE_R3_UUID, LINK_R1_R2,
-    LINK_R1_R2_ID, LINK_R1_R2_UUID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, SERVICE_R1_R3,
-    SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, TOPOLOGY, TOPOLOGY_ID,
-    POLICY_RULE, POLICY_RULE_ID, POLICY_RULE_UUID)
-
-LOGGER = logging.getLogger(__name__)
-LOGGER.setLevel(logging.DEBUG)
-
-LOCAL_HOST = '127.0.0.1'
-GRPC_PORT = 10000 + int(get_service_port_grpc(ServiceNameEnum.CONTEXT)) # avoid privileged ports
-HTTP_PORT = 10000 + int(get_service_port_http(ServiceNameEnum.CONTEXT)) # avoid privileged ports
-
-os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
-os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT)
-os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(HTTP_PORT)
-
-DEFAULT_REDIS_SERVICE_HOST = LOCAL_HOST
-DEFAULT_REDIS_SERVICE_PORT = 6379
-DEFAULT_REDIS_DATABASE_ID  = 0
-
-REDIS_CONFIG = {
-    'REDIS_SERVICE_HOST': os.environ.get('REDIS_SERVICE_HOST', DEFAULT_REDIS_SERVICE_HOST),
-    'REDIS_SERVICE_PORT': os.environ.get('REDIS_SERVICE_PORT', DEFAULT_REDIS_SERVICE_PORT),
-    'REDIS_DATABASE_ID' : os.environ.get('REDIS_DATABASE_ID',  DEFAULT_REDIS_DATABASE_ID ),
-}
-
-SCENARIOS = [
-    ('all_inmemory', DatabaseBackendEnum.INMEMORY, {},           MessageBrokerBackendEnum.INMEMORY, {}          )
-#    ('all_redis',    DatabaseBackendEnum.REDIS,    REDIS_CONFIG, MessageBrokerBackendEnum.REDIS,    REDIS_CONFIG),
-]
-
-@pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS)
-def context_db_mb(request) -> Tuple[Database, MessageBroker]:
-    name,db_backend,db_settings,mb_backend,mb_settings = request.param
-    msg = 'Running scenario {:s} db_backend={:s}, db_settings={:s}, mb_backend={:s}, mb_settings={:s}...'
-    LOGGER.info(msg.format(str(name), str(db_backend.value), str(db_settings), str(mb_backend.value), str(mb_settings)))
-    _database = Database(get_database_backend(backend=db_backend, **db_settings))
-    _message_broker = MessageBroker(get_messagebroker_backend(backend=mb_backend, **mb_settings))
-    yield _database, _message_broker
-    _message_broker.terminate()
-
-@pytest.fixture(scope='session')
-def context_service_grpc(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name
-    _service = ContextService(context_db_mb[0], context_db_mb[1])
-    _service.start()
-    yield _service
-    _service.stop()
-
-@pytest.fixture(scope='session')
-def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name
-    database = context_db_mb[0]
-    _rest_server = RestServer()
-    for endpoint_name, resource_class, resource_url in RESOURCES:
-        _rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,))
-    _rest_server.start()
-    time.sleep(1) # bring time for the server to start
-    yield _rest_server
-    _rest_server.shutdown()
-    _rest_server.join()
-
-@pytest.fixture(scope='session')
-def context_client_grpc(context_service_grpc : ContextService): # pylint: disable=redefined-outer-name
-    _client = ContextClient()
-    yield _client
-    _client.close()
-
-def do_rest_request(url : str):
-    base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
-    request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
-    LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
-    reply = requests.get(request_url)
-    LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
-    assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
-    return reply.json()
-
-
-# ----- Test gRPC methods ----------------------------------------------------------------------------------------------
-
-def test_grpc_context(
-    context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
-    context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
-    context_database = context_db_mb[0]
-
-    # ----- Clean the database -----------------------------------------------------------------------------------------
-    context_database.clear_all()
-
-    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
-    events_collector = EventsCollector(context_client_grpc)
-    events_collector.start()
-
-    # ----- Get when the object does not exist -------------------------------------------------------------------------
-    with pytest.raises(grpc.RpcError) as e:
-        context_client_grpc.GetContext(ContextId(**CONTEXT_ID))
-    assert e.value.code() == grpc.StatusCode.NOT_FOUND
-    assert e.value.details() == 'Context({:s}) not found'.format(DEFAULT_CONTEXT_UUID)
-
-    # ----- List when the object does not exist ------------------------------------------------------------------------
-    response = context_client_grpc.ListContextIds(Empty())
-    assert len(response.context_ids) == 0
-
-    response = context_client_grpc.ListContexts(Empty())
-    assert len(response.contexts) == 0
-
-    # ----- Dump state of database before create the object ------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 0
-
-    # ----- Create the object ------------------------------------------------------------------------------------------
-    response = context_client_grpc.SetContext(Context(**CONTEXT))
-    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    with pytest.raises(grpc.RpcError) as e:
-        WRONG_TOPOLOGY_ID = copy.deepcopy(TOPOLOGY_ID)
-        WRONG_TOPOLOGY_ID['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid'
-        WRONG_CONTEXT = copy.deepcopy(CONTEXT)
-        WRONG_CONTEXT['topology_ids'].append(WRONG_TOPOLOGY_ID)
-        context_client_grpc.SetContext(Context(**WRONG_CONTEXT))
-    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
-    msg = 'request.topology_ids[0].context_id.context_uuid.uuid(wrong-context-uuid) is invalid; '\
-          'should be == request.context_id.context_uuid.uuid(admin)'
-    assert e.value.details() == msg
-
-    with pytest.raises(grpc.RpcError) as e:
-        WRONG_SERVICE_ID = copy.deepcopy(SERVICE_R1_R2_ID)
-        WRONG_SERVICE_ID['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid'
-        WRONG_CONTEXT = copy.deepcopy(CONTEXT)
-        WRONG_CONTEXT['service_ids'].append(WRONG_SERVICE_ID)
-        context_client_grpc.SetContext(Context(**WRONG_CONTEXT))
-    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
-    msg = 'request.service_ids[0].context_id.context_uuid.uuid(wrong-context-uuid) is invalid; '\
-          'should be == request.context_id.context_uuid.uuid(admin)'
-    assert e.value.details() == msg
-
-    # ----- Check create event -----------------------------------------------------------------------------------------
-    event = events_collector.get_event(block=True)
-    assert isinstance(event, ContextEvent)
-    assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Update the object ------------------------------------------------------------------------------------------
-    response = context_client_grpc.SetContext(Context(**CONTEXT))
-    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Check update event -----------------------------------------------------------------------------------------
-    event = events_collector.get_event(block=True)
-    assert isinstance(event, ContextEvent)
-    assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
-    assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Dump state of database after create/update the object ------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 2
-
-    # ----- Get when the object exists ---------------------------------------------------------------------------------
-    response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID))
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert len(response.topology_ids) == 0
-    assert len(response.service_ids) == 0
-
-    # ----- List when the object exists --------------------------------------------------------------------------------
-    response = context_client_grpc.ListContextIds(Empty())
-    assert len(response.context_ids) == 1
-    assert response.context_ids[0].context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    response = context_client_grpc.ListContexts(Empty())
-    assert len(response.contexts) == 1
-    assert response.contexts[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert len(response.contexts[0].topology_ids) == 0
-    assert len(response.contexts[0].service_ids) == 0
-
-    # ----- Remove the object ------------------------------------------------------------------------------------------
-    context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID))
-
-    # ----- Check remove event -----------------------------------------------------------------------------------------
-    event = events_collector.get_event(block=True)
-    assert isinstance(event, ContextEvent)
-    assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    events_collector.stop()
-
-    # ----- Dump state of database after remove the object -------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 0
-
-
-def test_grpc_topology(
-    context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
-    context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
-    context_database = context_db_mb[0]
-
-    # ----- Clean the database -----------------------------------------------------------------------------------------
-    context_database.clear_all()
-
-    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
-    events_collector = EventsCollector(context_client_grpc)
-    events_collector.start()
-
-    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
-    response = context_client_grpc.SetContext(Context(**CONTEXT))
-    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    event = events_collector.get_event(block=True)
-    assert isinstance(event, ContextEvent)
-    assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Get when the object does not exist -------------------------------------------------------------------------
-    with pytest.raises(grpc.RpcError) as e:
-        context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID))
-    assert e.value.code() == grpc.StatusCode.NOT_FOUND
-    assert e.value.details() == 'Topology({:s}/{:s}) not found'.format(DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID)
-
-    # ----- List when the object does not exist ------------------------------------------------------------------------
-    response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID))
-    assert len(response.topology_ids) == 0
-
-    response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == 0
-
-    # ----- Dump state of database before create the object ------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 2
-
-    # ----- Create the object ------------------------------------------------------------------------------------------
-    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY))
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    CONTEXT_WITH_TOPOLOGY = copy.deepcopy(CONTEXT)
-    CONTEXT_WITH_TOPOLOGY['topology_ids'].append(TOPOLOGY_ID)
-    response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_TOPOLOGY))
-    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Check create event -----------------------------------------------------------------------------------------
-    events = events_collector.get_events(block=True, count=2)
-
-    assert isinstance(events[0], TopologyEvent)
-    assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    assert isinstance(events[1], ContextEvent)
-    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
-    assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Update the object ------------------------------------------------------------------------------------------
-    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY))
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    # ----- Check update event -----------------------------------------------------------------------------------------
-    event = events_collector.get_event(block=True)
-    assert isinstance(event, TopologyEvent)
-    assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
-    assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    # ----- Dump state of database after create/update the object ------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 5
-
-    # ----- Get when the object exists ---------------------------------------------------------------------------------
-    response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID))
-    assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-    assert len(response.device_ids) == 0
-    assert len(response.link_ids) == 0
-
-    # ----- List when the object exists --------------------------------------------------------------------------------
-    response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID))
-    assert len(response.topology_ids) == 1
-    assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID))
-    assert len(response.topologies) == 1
-    assert response.topologies[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topologies[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-    assert len(response.topologies[0].device_ids) == 0
-    assert len(response.topologies[0].link_ids) == 0
-
-    # ----- Remove the object ------------------------------------------------------------------------------------------
-    context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID))
-    context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID))
-
-    # ----- Check remove event -----------------------------------------------------------------------------------------
-    events = events_collector.get_events(block=True, count=2)
-
-    assert isinstance(events[0], TopologyEvent)
-    assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    assert isinstance(events[1], ContextEvent)
-    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    events_collector.stop()
-
-    # ----- Dump state of database after remove the object -------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 0
-
-
-def test_grpc_device(
-    context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
-    context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
-    context_database = context_db_mb[0]
-
-    # ----- Clean the database -----------------------------------------------------------------------------------------
-    context_database.clear_all()
-
-    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
-    events_collector = EventsCollector(context_client_grpc)
-    events_collector.start()
-
-    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
-    response = context_client_grpc.SetContext(Context(**CONTEXT))
-    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY))
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    events = events_collector.get_events(block=True, count=2)
-
-    assert isinstance(events[0], ContextEvent)
-    assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    assert isinstance(events[1], TopologyEvent)
-    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    # ----- Get when the object does not exist -------------------------------------------------------------------------
-    with pytest.raises(grpc.RpcError) as e:
-        context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID))
-    assert e.value.code() == grpc.StatusCode.NOT_FOUND
-    assert e.value.details() == 'Device({:s}) not found'.format(DEVICE_R1_UUID)
-
-    # ----- List when the object does not exist ------------------------------------------------------------------------
-    response = context_client_grpc.ListDeviceIds(Empty())
-    assert len(response.device_ids) == 0
-
-    response = context_client_grpc.ListDevices(Empty())
-    assert len(response.devices) == 0
-
-    # ----- Dump state of database before create the object ------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 5
-
-    # ----- Create the object ------------------------------------------------------------------------------------------
-    with pytest.raises(grpc.RpcError) as e:
-        WRONG_DEVICE = copy.deepcopy(DEVICE_R1)
-        WRONG_DEVICE['device_endpoints'][0]['endpoint_id']['device_id']['device_uuid']['uuid'] = 'wrong-device-uuid'
-        context_client_grpc.SetDevice(Device(**WRONG_DEVICE))
-    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
-    msg = 'request.device_endpoints[0].device_id.device_uuid.uuid(wrong-device-uuid) is invalid; '\
-          'should be == request.device_id.device_uuid.uuid({:s})'.format(DEVICE_R1_UUID)
-    assert e.value.details() == msg
-
-    response = context_client_grpc.SetDevice(Device(**DEVICE_R1))
-    assert response.device_uuid.uuid == DEVICE_R1_UUID
-
-    # ----- Check create event -----------------------------------------------------------------------------------------
-    event = events_collector.get_event(block=True)
-    assert isinstance(event, DeviceEvent)
-    assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID
-
-    # ----- Update the object ------------------------------------------------------------------------------------------
-    response = context_client_grpc.SetDevice(Device(**DEVICE_R1))
-    assert response.device_uuid.uuid == DEVICE_R1_UUID
-
-    # ----- Check update event -----------------------------------------------------------------------------------------
-    event = events_collector.get_event(block=True)
-    assert isinstance(event, DeviceEvent)
-    assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
-    assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID
-
-    # ----- Dump state of database after create/update the object ------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 47
-
-    # ----- Get when the object exists ---------------------------------------------------------------------------------
-    response = context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID))
-    assert response.device_id.device_uuid.uuid == DEVICE_R1_UUID
-    assert response.device_type == 'packet-router'
-    assert len(response.device_config.config_rules) == 3
-    assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
-    assert len(response.device_drivers) == 1
-    assert len(response.device_endpoints) == 3
-
-    # ----- List when the object exists --------------------------------------------------------------------------------
-    response = context_client_grpc.ListDeviceIds(Empty())
-    assert len(response.device_ids) == 1
-    assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID
-
-    response = context_client_grpc.ListDevices(Empty())
-    assert len(response.devices) == 1
-    assert response.devices[0].device_id.device_uuid.uuid == DEVICE_R1_UUID
-    assert response.devices[0].device_type == 'packet-router'
-    assert len(response.devices[0].device_config.config_rules) == 3
-    assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
-    assert len(response.devices[0].device_drivers) == 1
-    assert len(response.devices[0].device_endpoints) == 3
-
-    # ----- Create object relation -------------------------------------------------------------------------------------
-    TOPOLOGY_WITH_DEVICE = copy.deepcopy(TOPOLOGY)
-    TOPOLOGY_WITH_DEVICE['device_ids'].append(DEVICE_R1_ID)
-    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_DEVICE))
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    # ----- Check update event -----------------------------------------------------------------------------------------
-    event = events_collector.get_event(block=True)
-    assert isinstance(event, TopologyEvent)
-    assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    # ----- Check relation was created ---------------------------------------------------------------------------------
-    response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID))
-    assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-    assert len(response.device_ids) == 1
-    assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID
-    assert len(response.link_ids) == 0
-
-    # ----- Dump state of database after creating the object relation --------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 47
-
-    # ----- Remove the object ------------------------------------------------------------------------------------------
-    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID))
-    context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID))
-    context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID))
-
-    # ----- Check remove event -----------------------------------------------------------------------------------------
-    events = events_collector.get_events(block=True, count=3)
-
-    assert isinstance(events[0], DeviceEvent)
-    assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[0].device_id.device_uuid.uuid == DEVICE_R1_UUID
-
-    assert isinstance(events[1], TopologyEvent)
-    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    assert isinstance(events[2], ContextEvent)
-    assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[2].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    events_collector.stop()
-
-    # ----- Dump state of database after remove the object -------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 0
-
-
-def test_grpc_link(
-    context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
-    context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
-    context_database = context_db_mb[0]
-
-    # ----- Clean the database -----------------------------------------------------------------------------------------
-    context_database.clear_all()
-
-    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
-    events_collector = EventsCollector(context_client_grpc)
-    events_collector.start()
-
-    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
-    response = context_client_grpc.SetContext(Context(**CONTEXT))
-    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY))
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    response = context_client_grpc.SetDevice(Device(**DEVICE_R1))
-    assert response.device_uuid.uuid == DEVICE_R1_UUID
-
-    response = context_client_grpc.SetDevice(Device(**DEVICE_R2))
-    assert response.device_uuid.uuid == DEVICE_R2_UUID
-
-    events = events_collector.get_events(block=True, count=4)
-
-    assert isinstance(events[0], ContextEvent)
-    assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    assert isinstance(events[1], TopologyEvent)
-    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    assert isinstance(events[2], DeviceEvent)
-    assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID
-
-    assert isinstance(events[3], DeviceEvent)
-    assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID
-
-    # ----- Get when the object does not exist -------------------------------------------------------------------------
-    with pytest.raises(grpc.RpcError) as e:
-        context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID))
-    assert e.value.code() == grpc.StatusCode.NOT_FOUND
-    assert e.value.details() == 'Link({:s}) not found'.format(LINK_R1_R2_UUID)
-
-    # ----- List when the object does not exist ------------------------------------------------------------------------
-    response = context_client_grpc.ListLinkIds(Empty())
-    assert len(response.link_ids) == 0
-
-    response = context_client_grpc.ListLinks(Empty())
-    assert len(response.links) == 0
-
-    # ----- Dump state of database before create the object ------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 80
-
-    # ----- Create the object ------------------------------------------------------------------------------------------
-    response = context_client_grpc.SetLink(Link(**LINK_R1_R2))
-    assert response.link_uuid.uuid == LINK_R1_R2_UUID
-
-    # ----- Check create event -----------------------------------------------------------------------------------------
-    event = events_collector.get_event(block=True)
-    assert isinstance(event, LinkEvent)
-    assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID
-
-    # ----- Update the object ------------------------------------------------------------------------------------------
-    response = context_client_grpc.SetLink(Link(**LINK_R1_R2))
-    assert response.link_uuid.uuid == LINK_R1_R2_UUID
-
-    # ----- Check update event -----------------------------------------------------------------------------------------
-    event = events_collector.get_event(block=True)
-    assert isinstance(event, LinkEvent)
-    assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
-    assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID
-
-    # ----- Dump state of database after create/update the object ------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 88
-
-    # ----- Get when the object exists ---------------------------------------------------------------------------------
-    response = context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID))
-    assert response.link_id.link_uuid.uuid == LINK_R1_R2_UUID
-    assert len(response.link_endpoint_ids) == 2
-
-    # ----- List when the object exists --------------------------------------------------------------------------------
-    response = context_client_grpc.ListLinkIds(Empty())
-    assert len(response.link_ids) == 1
-    assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID
-
-    response = context_client_grpc.ListLinks(Empty())
-    assert len(response.links) == 1
-    assert response.links[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID
-    assert len(response.links[0].link_endpoint_ids) == 2
-
-    # ----- Create object relation -------------------------------------------------------------------------------------
-    TOPOLOGY_WITH_LINK = copy.deepcopy(TOPOLOGY)
-    TOPOLOGY_WITH_LINK['link_ids'].append(LINK_R1_R2_ID)
-    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_LINK))
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    # ----- Check update event -----------------------------------------------------------------------------------------
-    event = events_collector.get_event(block=True)
-    assert isinstance(event, TopologyEvent)
-    assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    # ----- Check relation was created ---------------------------------------------------------------------------------
-    response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID))
-    assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-    assert len(response.device_ids) == 2
-    assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID
-    assert response.device_ids[1].device_uuid.uuid == DEVICE_R2_UUID
-    assert len(response.link_ids) == 1
-    assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID
-
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 88
-
-    # ----- Remove the object ------------------------------------------------------------------------------------------
-    context_client_grpc.RemoveLink(LinkId(**LINK_R1_R2_ID))
-    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID))
-    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID))
-    context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID))
-    context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID))
-
-    # ----- Check remove event -----------------------------------------------------------------------------------------
-    events = events_collector.get_events(block=True, count=5)
-
-    assert isinstance(events[0], LinkEvent)
-    assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID
-
-    assert isinstance(events[1], DeviceEvent)
-    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID
-
-    assert isinstance(events[2], DeviceEvent)
-    assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID
-
-    assert isinstance(events[3], TopologyEvent)
-    assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    assert isinstance(events[4], ContextEvent)
-    assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    events_collector.stop()
-
-    # ----- Dump state of database after remove the object -------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 0
-
-
-def test_grpc_service(
-    context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
-    context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
-    context_database = context_db_mb[0]
-
-    # ----- Clean the database -----------------------------------------------------------------------------------------
-    context_database.clear_all()
-
-    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
-    events_collector = EventsCollector(context_client_grpc)
-    events_collector.start()
-
-    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
-    response = context_client_grpc.SetContext(Context(**CONTEXT))
-    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY))
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    response = context_client_grpc.SetDevice(Device(**DEVICE_R1))
-    assert response.device_uuid.uuid == DEVICE_R1_UUID
-
-    response = context_client_grpc.SetDevice(Device(**DEVICE_R2))
-    assert response.device_uuid.uuid == DEVICE_R2_UUID
-
-    events = events_collector.get_events(block=True, count=4)
-
-    assert isinstance(events[0], ContextEvent)
-    assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    assert isinstance(events[1], TopologyEvent)
-    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    assert isinstance(events[2], DeviceEvent)
-    assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID
-
-    assert isinstance(events[3], DeviceEvent)
-    assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID
-
-    # ----- Get when the object does not exist -------------------------------------------------------------------------
-    with pytest.raises(grpc.RpcError) as e:
-        context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID))
-    assert e.value.code() == grpc.StatusCode.NOT_FOUND
-    assert e.value.details() == 'Service({:s}/{:s}) not found'.format(DEFAULT_CONTEXT_UUID, SERVICE_R1_R2_UUID)
-
-    # ----- List when the object does not exist ------------------------------------------------------------------------
-    response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID))
-    assert len(response.service_ids) == 0
-
-    response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID))
-    assert len(response.services) == 0
-
-    # ----- Dump state of database before create the object ------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 80
-
-    # ----- Create the object ------------------------------------------------------------------------------------------
-    with pytest.raises(grpc.RpcError) as e:
-        WRONG_SERVICE = copy.deepcopy(SERVICE_R1_R2)
-        WRONG_SERVICE['service_endpoint_ids'][0]\
-            ['topology_id']['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid'
-        context_client_grpc.SetService(Service(**WRONG_SERVICE))
-    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
-    msg = 'request.service_endpoint_ids[0].topology_id.context_id.context_uuid.uuid(wrong-context-uuid) is invalid; '\
-          'should be == request.service_id.context_id.context_uuid.uuid({:s})'.format(DEFAULT_CONTEXT_UUID)
-    assert e.value.details() == msg
-
-    response = context_client_grpc.SetService(Service(**SERVICE_R1_R2))
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.service_uuid.uuid == SERVICE_R1_R2_UUID
-
-    CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT)
-    CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R2_ID)
-    response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE))
-    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Check create event -----------------------------------------------------------------------------------------
-    events = events_collector.get_events(block=True, count=2)
-
-    assert isinstance(events[0], ServiceEvent)
-    assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID
-
-    assert isinstance(events[1], ContextEvent)
-    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
-    assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Update the object ------------------------------------------------------------------------------------------
-    response = context_client_grpc.SetService(Service(**SERVICE_R1_R2))
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.service_uuid.uuid == SERVICE_R1_R2_UUID
-
-    # ----- Check update event -----------------------------------------------------------------------------------------
-    event = events_collector.get_event(block=True)
-    assert isinstance(event, ServiceEvent)
-    assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
-    assert event.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert event.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID
-
-    # ----- Dump state of database after create/update the object ------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 108
-
-    # ----- Get when the object exists ---------------------------------------------------------------------------------
-    response = context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID))
-    assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID
-    assert response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM
-    assert len(response.service_endpoint_ids) == 2
-    assert len(response.service_constraints) == 2
-    assert response.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED
-    assert len(response.service_config.config_rules) == 3
-
-    # ----- List when the object exists --------------------------------------------------------------------------------
-    response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID))
-    assert len(response.service_ids) == 1
-    assert response.service_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.service_ids[0].service_uuid.uuid == SERVICE_R1_R2_UUID
-
-    response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID))
-    assert len(response.services) == 1
-    assert response.services[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.services[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID
-    assert response.services[0].service_type == ServiceTypeEnum.SERVICETYPE_L3NM
-    assert len(response.services[0].service_endpoint_ids) == 2
-    assert len(response.services[0].service_constraints) == 2
-    assert response.services[0].service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED
-    assert len(response.services[0].service_config.config_rules) == 3
-
-    # ----- Remove the object ------------------------------------------------------------------------------------------
-    context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID))
-    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID))
-    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID))
-    context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID))
-    context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID))
-
-    # ----- Check remove event -----------------------------------------------------------------------------------------
-    events = events_collector.get_events(block=True, count=5)
-
-    assert isinstance(events[0], ServiceEvent)
-    assert events[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID
-
-    assert isinstance(events[1], DeviceEvent)
-    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID
-
-    assert isinstance(events[2], DeviceEvent)
-    assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID
-
-    assert isinstance(events[3], TopologyEvent)
-    assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    assert isinstance(events[4], ContextEvent)
-    assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    events_collector.stop()
-
-    # ----- Dump state of database after remove the object -------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 0
-
-
-def test_grpc_connection(
-    context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
-    context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
-    context_database = context_db_mb[0]
-
-    # ----- Clean the database -----------------------------------------------------------------------------------------
-    context_database.clear_all()
-
-    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
-    events_collector = EventsCollector(context_client_grpc)
-    events_collector.start()
-
-    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
-    response = context_client_grpc.SetContext(Context(**CONTEXT))
-    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY))
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    response = context_client_grpc.SetDevice(Device(**DEVICE_R1))
-    assert response.device_uuid.uuid == DEVICE_R1_UUID
-
-    response = context_client_grpc.SetDevice(Device(**DEVICE_R2))
-    assert response.device_uuid.uuid == DEVICE_R2_UUID
-
-    response = context_client_grpc.SetDevice(Device(**DEVICE_R3))
-    assert response.device_uuid.uuid == DEVICE_R3_UUID
-
-    response = context_client_grpc.SetService(Service(**SERVICE_R1_R2))
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.service_uuid.uuid == SERVICE_R1_R2_UUID
-
-    CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT)
-    CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R2_ID)
-    response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE))
-    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    response = context_client_grpc.SetService(Service(**SERVICE_R2_R3))
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.service_uuid.uuid == SERVICE_R2_R3_UUID
-
-    CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT)
-    CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R2_R3_ID)
-    response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE))
-    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    response = context_client_grpc.SetService(Service(**SERVICE_R1_R3))
-    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.service_uuid.uuid == SERVICE_R1_R3_UUID
-
-    CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT)
-    CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R3_ID)
-    response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE))
-    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    events = events_collector.get_events(block=True, count=11)
-
-    assert isinstance(events[0], ContextEvent)
-    assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    assert isinstance(events[1], TopologyEvent)
-    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    assert isinstance(events[2], DeviceEvent)
-    assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID
-
-    assert isinstance(events[3], DeviceEvent)
-    assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID
-
-    assert isinstance(events[4], DeviceEvent)
-    assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[4].device_id.device_uuid.uuid == DEVICE_R3_UUID
-
-    assert isinstance(events[5], ServiceEvent)
-    assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[5].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[5].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID
-
-    assert isinstance(events[6], ContextEvent)
-    assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
-    assert events[6].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    assert isinstance(events[7], ServiceEvent)
-    assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[7].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[7].service_id.service_uuid.uuid == SERVICE_R2_R3_UUID
-
-    assert isinstance(events[8], ContextEvent)
-    assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
-    assert events[8].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    assert isinstance(events[9], ServiceEvent)
-    assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert events[9].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[9].service_id.service_uuid.uuid == SERVICE_R1_R3_UUID
-
-    assert isinstance(events[10], ContextEvent)
-    assert events[10].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
-    assert events[10].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Get when the object does not exist -------------------------------------------------------------------------
-    with pytest.raises(grpc.RpcError) as e:
-        context_client_grpc.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID))
-    assert e.value.code() == grpc.StatusCode.NOT_FOUND
-    assert e.value.details() == 'Connection({:s}) not found'.format(CONNECTION_R1_R3_UUID)
-
-    # ----- List when the object does not exist ------------------------------------------------------------------------
-    response = context_client_grpc.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID))
-    assert len(response.connection_ids) == 0
-
-    response = context_client_grpc.ListConnections(ServiceId(**SERVICE_R1_R3_ID))
-    assert len(response.connections) == 0
-
-    # ----- Dump state of database before create the object ------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 187
-
-    # ----- Create the object ------------------------------------------------------------------------------------------
-    with pytest.raises(grpc.RpcError) as e:
-        WRONG_CONNECTION = copy.deepcopy(CONNECTION_R1_R3)
-        WRONG_CONNECTION['path_hops_endpoint_ids'][0]\
-            ['topology_id']['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid'
-        context_client_grpc.SetConnection(Connection(**WRONG_CONNECTION))
-    assert e.value.code() == grpc.StatusCode.NOT_FOUND
-    # TODO: should we check that all endpoints belong to same topology?
-    # TODO: should we check that endpoints form links over the topology?
-    msg = 'EndPoint({:s}/{:s}:wrong-context-uuid/{:s}) not found'.format(
-        DEVICE_R1_UUID, WRONG_CONNECTION['path_hops_endpoint_ids'][0]['endpoint_uuid']['uuid'], DEFAULT_TOPOLOGY_UUID)
-    assert e.value.details() == msg
-
-    response = context_client_grpc.SetConnection(Connection(**CONNECTION_R1_R3))
-    assert response.connection_uuid.uuid == CONNECTION_R1_R3_UUID
-
-    # ----- Check create event -----------------------------------------------------------------------------------------
-    event = events_collector.get_event(block=True)
-    assert isinstance(event, ConnectionEvent)
-    assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    assert event.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID
-
-    # ----- Update the object ------------------------------------------------------------------------------------------
-    response = context_client_grpc.SetConnection(Connection(**CONNECTION_R1_R3))
-    assert response.connection_uuid.uuid == CONNECTION_R1_R3_UUID
-
-    # ----- Check update event -----------------------------------------------------------------------------------------
-    event = events_collector.get_event(block=True)
-    assert isinstance(event, ConnectionEvent)
-    assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
-    assert event.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID
-
-    # ----- Dump state of database after create/update the object ------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 203
-
-    # ----- Get when the object exists ---------------------------------------------------------------------------------
-    response = context_client_grpc.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID))
-    assert response.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID
-    assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert response.service_id.service_uuid.uuid == SERVICE_R1_R3_UUID
-    assert len(response.path_hops_endpoint_ids) == 6
-    assert len(response.sub_service_ids) == 2
-
-    # ----- List when the object exists --------------------------------------------------------------------------------
-    response = context_client_grpc.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID))
-    assert len(response.connection_ids) == 1
-    assert response.connection_ids[0].connection_uuid.uuid == CONNECTION_R1_R3_UUID
-
-    response = context_client_grpc.ListConnections(ServiceId(**SERVICE_R1_R3_ID))
-    assert len(response.connections) == 1
-    assert response.connections[0].connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID
-    assert len(response.connections[0].path_hops_endpoint_ids) == 6
-    assert len(response.connections[0].sub_service_ids) == 2
-
-    # ----- Remove the object ------------------------------------------------------------------------------------------
-    context_client_grpc.RemoveConnection(ConnectionId(**CONNECTION_R1_R3_ID))
-    context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R3_ID))
-    context_client_grpc.RemoveService(ServiceId(**SERVICE_R2_R3_ID))
-    context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID))
-    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID))
-    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID))
-    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R3_ID))
-    context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID))
-    context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID))
-
-    # ----- Check remove event -----------------------------------------------------------------------------------------
-    events = events_collector.get_events(block=True, count=9)
-
-    assert isinstance(events[0], ConnectionEvent)
-    assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[0].connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID
-
-    assert isinstance(events[1], ServiceEvent)
-    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[1].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[1].service_id.service_uuid.uuid == SERVICE_R1_R3_UUID
-
-    assert isinstance(events[2], ServiceEvent)
-    assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[2].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[2].service_id.service_uuid.uuid == SERVICE_R2_R3_UUID
-
-    assert isinstance(events[3], ServiceEvent)
-    assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[3].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[3].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID
-
-    assert isinstance(events[4], DeviceEvent)
-    assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[4].device_id.device_uuid.uuid == DEVICE_R1_UUID
-
-    assert isinstance(events[5], DeviceEvent)
-    assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[5].device_id.device_uuid.uuid == DEVICE_R2_UUID
-
-    assert isinstance(events[6], DeviceEvent)
-    assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[6].device_id.device_uuid.uuid == DEVICE_R3_UUID
-
-    assert isinstance(events[7], TopologyEvent)
-    assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[7].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-    assert events[7].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
-
-    assert isinstance(events[8], ContextEvent)
-    assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    assert events[8].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    events_collector.stop()
-
-    # ----- Dump state of database after remove the object -------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 0
-
-
-def test_grpc_policy(
-    context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
-    context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
-    context_database = context_db_mb[0]
-
-    # ----- Clean the database -----------------------------------------------------------------------------------------
-    context_database.clear_all()
-
-    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
-    #events_collector = EventsCollector(context_client_grpc)
-    #events_collector.start()
-
-    # ----- Get when the object does not exist -------------------------------------------------------------------------
-    POLICY_ID = 'no-uuid'
-    DEFAULT_POLICY_ID = {'uuid': {'uuid': POLICY_ID}}
-
-    with pytest.raises(grpc.RpcError) as e:
-        context_client_grpc.GetPolicyRule(PolicyRuleId(**DEFAULT_POLICY_ID))
-
-    assert e.value.code() == grpc.StatusCode.NOT_FOUND
-    assert e.value.details() == 'PolicyRule({:s}) not found'.format(POLICY_ID)
-
-    # ----- List when the object does not exist ------------------------------------------------------------------------
-    response = context_client_grpc.ListPolicyRuleIds(Empty())
-    assert len(response.policyRuleIdList) == 0
-
-    response = context_client_grpc.ListPolicyRules(Empty())
-    assert len(response.policyRules) == 0
-
-    # ----- Dump state of database before create the object ------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry))  # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 0
-
-    # ----- Create the object ------------------------------------------------------------------------------------------
-    response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE))
-    assert response.uuid.uuid == POLICY_RULE_UUID
-
-    # ----- Check create event -----------------------------------------------------------------------------------------
-    # events = events_collector.get_events(block=True, count=1)
-    # assert isinstance(events[0], PolicyEvent)
-    # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
-    # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID
-
-    # ----- Update the object ------------------------------------------------------------------------------------------
-    response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE))
-    assert response.uuid.uuid == POLICY_RULE_UUID
-
-    # ----- Dump state of database after create/update the object ------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 2
-
-    # ----- Get when the object exists ---------------------------------------------------------------------------------
-    response = context_client_grpc.GetPolicyRule(PolicyRuleId(**POLICY_RULE_ID))
-    assert response.device.policyRuleBasic.policyRuleId.uuid.uuid == POLICY_RULE_UUID
-
-    # ----- List when the object exists --------------------------------------------------------------------------------
-    response = context_client_grpc.ListPolicyRuleIds(Empty())
-    assert len(response.policyRuleIdList) == 1
-    assert response.policyRuleIdList[0].uuid.uuid == POLICY_RULE_UUID
-
-    response = context_client_grpc.ListPolicyRules(Empty())
-    assert len(response.policyRules) == 1
-
-    # ----- Remove the object ------------------------------------------------------------------------------------------
-    context_client_grpc.RemovePolicyRule(PolicyRuleId(**POLICY_RULE_ID))
-
-    # ----- Check remove event -----------------------------------------------------------------------------------------
-    # events = events_collector.get_events(block=True, count=2)
-
-    # assert isinstance(events[0], PolicyEvent)
-    # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
-    # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID
-
-
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    # events_collector.stop()
-
-    # ----- Dump state of database after remove the object -------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 0
-
-
-
-# ----- Test REST API methods ------------------------------------------------------------------------------------------
-
-def test_rest_populate_database(
-    context_db_mb : Tuple[Database, MessageBroker], # pylint: disable=redefined-outer-name
-    context_service_grpc : ContextService           # pylint: disable=redefined-outer-name
-    ):
-    database = context_db_mb[0]
-    database.clear_all()
-    populate(LOCAL_HOST, GRPC_PORT)
-
-def test_rest_get_context_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/context_ids')
-    validate_context_ids(reply)
-
-def test_rest_get_contexts(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/contexts')
-    validate_contexts(reply)
-
-def test_rest_get_context(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    reply = do_rest_request('/context/{:s}'.format(context_uuid))
-    validate_context(reply)
-
-def test_rest_get_topology_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    reply = do_rest_request('/context/{:s}/topology_ids'.format(context_uuid))
-    validate_topology_ids(reply)
-
-def test_rest_get_topologies(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    reply = do_rest_request('/context/{:s}/topologies'.format(context_uuid))
-    validate_topologies(reply)
-
-def test_rest_get_topology(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    topology_uuid = urllib.parse.quote(DEFAULT_TOPOLOGY_UUID)
-    reply = do_rest_request('/context/{:s}/topology/{:s}'.format(context_uuid, topology_uuid))
-    validate_topology(reply, num_devices=3, num_links=3)
-
-def test_rest_get_service_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    reply = do_rest_request('/context/{:s}/service_ids'.format(context_uuid))
-    validate_service_ids(reply)
-
-def test_rest_get_services(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    reply = do_rest_request('/context/{:s}/services'.format(context_uuid))
-    validate_services(reply)
-
-def test_rest_get_service(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    service_uuid = urllib.parse.quote(SERVICE_R1_R2_UUID, safe='')
-    reply = do_rest_request('/context/{:s}/service/{:s}'.format(context_uuid, service_uuid))
-    validate_service(reply)
-
-def test_rest_get_slice_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    reply = do_rest_request('/context/{:s}/slice_ids'.format(context_uuid))
-    #validate_slice_ids(reply)
-
-def test_rest_get_slices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    reply = do_rest_request('/context/{:s}/slices'.format(context_uuid))
-    #validate_slices(reply)
-
-#def test_rest_get_slice(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-#    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-#    slice_uuid = urllib.parse.quote(SLICE_R1_R2_UUID, safe='')
-#    reply = do_rest_request('/context/{:s}/slice/{:s}'.format(context_uuid, slice_uuid))
-#    #validate_slice(reply)
-
-def test_rest_get_device_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/device_ids')
-    validate_device_ids(reply)
-
-def test_rest_get_devices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/devices')
-    validate_devices(reply)
-
-def test_rest_get_device(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    device_uuid = urllib.parse.quote(DEVICE_R1_UUID, safe='')
-    reply = do_rest_request('/device/{:s}'.format(device_uuid))
-    validate_device(reply)
-
-def test_rest_get_link_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/link_ids')
-    validate_link_ids(reply)
-
-def test_rest_get_links(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/links')
-    validate_links(reply)
-
-def test_rest_get_link(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    link_uuid = urllib.parse.quote(LINK_R1_R2_UUID, safe='')
-    reply = do_rest_request('/link/{:s}'.format(link_uuid))
-    validate_link(reply)
-
-def test_rest_get_connection_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='')
-    reply = do_rest_request('/context/{:s}/service/{:s}/connection_ids'.format(context_uuid, service_uuid))
-    validate_connection_ids(reply)
-
-def test_rest_get_connections(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='')
-    reply = do_rest_request('/context/{:s}/service/{:s}/connections'.format(context_uuid, service_uuid))
-    validate_connections(reply)
-
-def test_rest_get_connection(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    connection_uuid = urllib.parse.quote(CONNECTION_R1_R3_UUID, safe='')
-    reply = do_rest_request('/connection/{:s}'.format(connection_uuid))
-    validate_connection(reply)
-
-def test_rest_get_policyrule_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/policyrule_ids')
-    #validate_policyrule_ids(reply)
-
-def test_rest_get_policyrules(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/policyrules')
-    #validate_policyrules(reply)
-
-#def test_rest_get_policyrule(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-#    policyrule_uuid = urllib.parse.quote(POLICYRULE_UUID, safe='')
-#    reply = do_rest_request('/policyrule/{:s}'.format(policyrule_uuid))
-#    #validate_policyrule(reply)
-
-
-# ----- Test misc. Context internal tools ------------------------------------------------------------------------------
-
-def test_tools_fast_string_hasher():
-    with pytest.raises(TypeError) as e:
-        fast_hasher(27)
-    assert str(e.value) == "data(27) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'int'>"
-
-    with pytest.raises(TypeError) as e:
-        fast_hasher({27})
-    assert str(e.value) == "data({27}) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'set'>"
-
-    with pytest.raises(TypeError) as e:
-        fast_hasher({'27'})
-    assert str(e.value) == "data({'27'}) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'set'>"
-
-    with pytest.raises(TypeError) as e:
-        fast_hasher([27])
-    assert str(e.value) == "data[0](27) must be " + FASTHASHER_ITEM_ACCEPTED_FORMAT + ", found <class 'int'>"
-
-    fast_hasher('hello-world')
-    fast_hasher('hello-world'.encode('UTF-8'))
-    fast_hasher(['hello', 'world'])
-    fast_hasher(('hello', 'world'))
-    fast_hasher(['hello'.encode('UTF-8'), 'world'.encode('UTF-8')])
-    fast_hasher(('hello'.encode('UTF-8'), 'world'.encode('UTF-8')))
diff --git a/src/device/.gitlab-ci.yml b/src/device/.gitlab-ci.yml
index 3da19e7a38c9659fb8c86afd7beaefc7e08a6d7c..b0b32ab1558be4df7c4b4edbee55d733b6784c48 100644
--- a/src/device/.gitlab-ci.yml
+++ b/src/device/.gitlab-ci.yml
@@ -39,7 +39,7 @@ build device:
       - .gitlab-ci.yml
 
 # Apply unit test to the component
-unit test device:
+unit_test device:
   variables:
     IMAGE_NAME: 'device' # name of the microservice
     IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
@@ -79,28 +79,28 @@ unit test device:
       reports:
         junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
 
-# Deployment of the service in Kubernetes Cluster
-deploy device:
-  variables:
-    IMAGE_NAME: 'device' # name of the microservice
-    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
-  stage: deploy
-  needs:
-    - unit test device
-    # - integ_test execute
-  script:
-    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
-    - kubectl version
-    - kubectl get all
-    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
-    - kubectl get all
-  # environment:
-  #   name: test
-  #   url: https://example.com
-  #   kubernetes:
-  #     namespace: test
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
-      when: manual    
-    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
-      when: manual
+## Deployment of the service in Kubernetes Cluster
+#deploy device:
+#  variables:
+#    IMAGE_NAME: 'device' # name of the microservice
+#    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+#  stage: deploy
+#  needs:
+#    - unit test device
+#    # - integ_test execute
+#  script:
+#    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+#    - kubectl get all
+#  # environment:
+#  #   name: test
+#  #   url: https://example.com
+#  #   kubernetes:
+#  #     namespace: test
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual    
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
diff --git a/src/device/requirements.in b/src/device/requirements.in
index 2b9c199c86a580b72190a9d0e74a161e567abed2..96faa95c5a7f403835049d969552436c464f4813 100644
--- a/src/device/requirements.in
+++ b/src/device/requirements.in
@@ -1,13 +1,13 @@
 anytree==2.8.0
 APScheduler==3.8.1
-fastcache==1.1.0
+#fastcache==1.1.0
 Jinja2==3.0.3
 ncclient==0.6.13
 p4runtime==1.3.0
 paramiko==2.9.2
 python-json-logger==2.0.2
 pytz==2021.3
-redis==4.1.2
+#redis==4.1.2
 requests==2.27.1
 requests-mock==1.9.3
 xmltodict==0.12.0
diff --git a/src/device/service/DeviceService.py b/src/device/service/DeviceService.py
index 59134f26d3dd8c3fa0a9dddbcd1d3df298ec076a..ca165a200ec09961b10f5892107020682d8c7658 100644
--- a/src/device/service/DeviceService.py
+++ b/src/device/service/DeviceService.py
@@ -14,14 +14,11 @@
 
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_port_grpc
-from common.orm.backend.BackendEnum import BackendEnum
-from common.orm.Database import Database
-from common.orm.Factory import get_database_backend
 from common.proto.device_pb2_grpc import add_DeviceServiceServicer_to_server
 from common.tools.service.GenericGrpcService import GenericGrpcService
 from .driver_api.DriverInstanceCache import DriverInstanceCache
 from .DeviceServiceServicerImpl import DeviceServiceServicerImpl
-from .MonitoringLoops import MonitoringLoops
+from .monitoring.MonitoringLoops import MonitoringLoops
 
 # Custom gRPC settings
 # Multiple clients might keep connections alive waiting for RPC methods to be executed.
@@ -32,9 +29,8 @@ class DeviceService(GenericGrpcService):
     def __init__(self, driver_instance_cache : DriverInstanceCache, cls_name: str = __name__) -> None:
         port = get_service_port_grpc(ServiceNameEnum.DEVICE)
         super().__init__(port, max_workers=GRPC_MAX_WORKERS, cls_name=cls_name)
-        database = Database(get_database_backend(backend=BackendEnum.INMEMORY))
-        self.monitoring_loops = MonitoringLoops(database)
-        self.device_servicer = DeviceServiceServicerImpl(database, driver_instance_cache, self.monitoring_loops)
+        self.monitoring_loops = MonitoringLoops()
+        self.device_servicer = DeviceServiceServicerImpl(driver_instance_cache, self.monitoring_loops)
 
     def install_servicers(self):
         self.monitoring_loops.start()
diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py
index 88f49de6fb5c07e39b7efc9d26ccba135f95c929..179b7795b541801afb1db63f1ab532253a8cc851 100644
--- a/src/device/service/DeviceServiceServicerImpl.py
+++ b/src/device/service/DeviceServiceServicerImpl.py
@@ -12,47 +12,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import grpc, json, logging, re
-from typing import Any, Dict, List, Tuple
+import grpc, logging
 from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
-from common.method_wrappers.ServiceExceptions import InvalidArgumentException, OperationFailedException
-from common.orm.Database import Database
-from common.orm.HighLevel import get_object, update_or_create_object
-from common.orm.backend.Tools import key_to_str
-from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceConfig, DeviceId, Empty
+from common.method_wrappers.ServiceExceptions import NotFoundException, OperationFailedException
+from common.proto.context_pb2 import Device, DeviceConfig, DeviceId, DeviceOperationalStatusEnum, Empty
 from common.proto.device_pb2 import MonitoringSettings
 from common.proto.device_pb2_grpc import DeviceServiceServicer
-from common.proto.kpi_sample_types_pb2 import KpiSampleType
-from common.tools.grpc.Tools import grpc_message_to_json
+from common.tools.context_queries.Device import get_device
 from common.tools.mutex_queues.MutexQueues import MutexQueues
 from context.client.ContextClient import ContextClient
-from .database.ConfigModel import (
-    ConfigModel, ConfigRuleModel, ORM_ConfigActionEnum, get_config_rules, grpc_config_rules_to_raw, update_config)
-from .database.DatabaseTools import (
-    delete_device_from_context, get_device_driver_filter_fields, sync_device_from_context, sync_device_to_context,
-    update_device_in_local_database)
-from .database.DeviceModel import DeviceModel, DriverModel
-from .database.EndPointModel import EndPointModel, EndPointMonitorModel
-from .database.KpiModel import KpiModel
-from .database.KpiSampleType import ORM_KpiSampleTypeEnum, grpc_to_enum__kpi_sample_type
-from .database.RelationModels import EndPointMonitorKpiModel
-from .driver_api._Driver import _Driver, RESOURCE_ENDPOINTS #, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES
-from .driver_api.DriverInstanceCache import DriverInstanceCache
-from .driver_api.Tools import (
-    check_delete_errors, check_set_errors, check_subscribe_errors, check_unsubscribe_errors)
-from .MonitoringLoops import MonitoringLoops
+from device.service.Errors import ERROR_MISSING_DRIVER, ERROR_MISSING_KPI
+from .driver_api._Driver import _Driver
+from .driver_api.DriverInstanceCache import DriverInstanceCache, get_driver
+from .monitoring.MonitoringLoops import MonitoringLoops
+from .Tools import (
+    check_connect_rules, check_no_endpoints, compute_rules_to_add_delete, configure_rules, deconfigure_rules,
+    populate_config_rules, populate_endpoints, populate_initial_config_rules, subscribe_kpi, unsubscribe_kpi)
 
 LOGGER = logging.getLogger(__name__)
 
 METRICS_POOL = MetricsPool('Device', 'RPC')
 
 class DeviceServiceServicerImpl(DeviceServiceServicer):
-    def __init__(
-        self, database : Database, driver_instance_cache : DriverInstanceCache, monitoring_loops : MonitoringLoops
-    ) -> None:
+    def __init__(self, driver_instance_cache : DriverInstanceCache, monitoring_loops : MonitoringLoops) -> None:
         LOGGER.debug('Creating Servicer...')
-        self.context_client = ContextClient()
-        self.database = database
         self.driver_instance_cache = driver_instance_cache
         self.monitoring_loops = monitoring_loops
         self.mutex_queues = MutexQueues()
@@ -60,117 +43,50 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def AddDevice(self, request : Device, context : grpc.ServicerContext) -> DeviceId:
-        device_id = request.device_id
-        device_uuid = device_id.device_uuid.uuid
-
-        connection_config_rules = {}
-        unexpected_config_rules = []
-        for config_rule in request.device_config.config_rules:
-            if (config_rule.action == ConfigActionEnum.CONFIGACTION_SET) and \
-               (config_rule.WhichOneof('config_rule') == 'custom') and \
-               (config_rule.custom.resource_key.startswith('_connect/')):
-                connection_config_rules[
-                    config_rule.custom.resource_key.replace('_connect/', '')
-                ] = config_rule.custom.resource_value
-            else:
-                unexpected_config_rules.append(config_rule)
-        if len(unexpected_config_rules) > 0:
-            unexpected_config_rules = grpc_message_to_json(request.device_config)
-            unexpected_config_rules = unexpected_config_rules['config_rules']
-            unexpected_config_rules = list(filter(
-                lambda cr: cr.get('custom', {})['resource_key'].replace('_connect/', '') not in connection_config_rules,
-                unexpected_config_rules))
-            str_unexpected_config_rules = json.dumps(unexpected_config_rules, sort_keys=True)
-            raise InvalidArgumentException(
-                'device.device_config.config_rules', str_unexpected_config_rules,
-                extra_details='RPC method AddDevice only accepts connection Config Rules that should start '\
-                              'with "_connect/" tag. Others should be configured after adding the device.')
-
-        if len(request.device_endpoints) > 0:
-            unexpected_endpoints = []
-            for device_endpoint in request.device_endpoints:
-                unexpected_endpoints.append(grpc_message_to_json(device_endpoint))
-            str_unexpected_endpoints = json.dumps(unexpected_endpoints, sort_keys=True)
-            raise InvalidArgumentException(
-                'device.device_endpoints', str_unexpected_endpoints,
-                extra_details='RPC method AddDevice does not accept Endpoints. Endpoints are discovered through '\
-                              'interrogation of the physical device.')
-
-        # Remove device configuration
-        json_request = grpc_message_to_json(request, use_integers_for_enums=True)
-        json_request['device_config'] = {}
-        request = Device(**json_request)
+        device_uuid = request.device_id.device_uuid.uuid
+
+        connection_config_rules = check_connect_rules(request.device_config)
+        check_no_endpoints(request.device_endpoints)
+
+        context_client = ContextClient()
+        device = get_device(context_client, device_uuid, rw_copy=True)
+        if device is None:
+            # not in context, create blank one to get UUID, and populate it below
+            device = Device()
+            device.device_id.CopyFrom(request.device_id)            # pylint: disable=no-member
+            device.name = request.name
+            device.device_type = request.device_type
+            device.device_operational_status = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED
+            device.device_drivers.extend(request.device_drivers)    # pylint: disable=no-member
+            device.device_config.CopyFrom(request.device_config)    # pylint: disable=no-member
+            device_id = context_client.SetDevice(device)
+            device = get_device(context_client, device_id.device_uuid.uuid, rw_copy=True)
+
+        # update device_uuid to honor UUID provided by Context
+        device_uuid = device.device_id.device_uuid.uuid
 
         self.mutex_queues.wait_my_turn(device_uuid)
         try:
-            sync_device_from_context(device_uuid, self.context_client, self.database)
-            db_device,_ = update_device_in_local_database(self.database, request)
-
-            driver_filter_fields = get_device_driver_filter_fields(db_device)
-
-            #LOGGER.info('[AddDevice] connection_config_rules = {:s}'.format(str(connection_config_rules)))
-            address  = connection_config_rules.pop('address', None)
-            port     = connection_config_rules.pop('port', None)
-            settings = connection_config_rules.pop('settings', '{}')
-            try:
-                settings = json.loads(settings)
-            except ValueError as e:
-                raise InvalidArgumentException(
-                    'device.device_config.config_rules[settings]', settings,
-                    extra_details='_connect/settings Config Rules provided cannot be decoded as JSON dictionary.') from e
-            driver : _Driver = self.driver_instance_cache.get(
-                device_uuid, filter_fields=driver_filter_fields, address=address, port=port, settings=settings)
-            driver.Connect()
-
-            endpoints = driver.GetConfig([RESOURCE_ENDPOINTS])
-            try:
-                for resource_key, resource_value in endpoints:
-                    if isinstance(resource_value, Exception):
-                        LOGGER.error('Error retrieving "{:s}": {:s}'.format(str(RESOURCE_ENDPOINTS), str(resource_value)))
-                        continue
-                    endpoint_uuid = resource_value.get('uuid')
-                    endpoint_type = resource_value.get('type')
-                    str_endpoint_key = key_to_str([device_uuid, endpoint_uuid])
-                    db_endpoint, _ = update_or_create_object(
-                        self.database, EndPointModel, str_endpoint_key, {
-                        'device_fk'    : db_device,
-                        'endpoint_uuid': endpoint_uuid,
-                        'endpoint_type': endpoint_type,
-                        'resource_key' : resource_key,
-                    })
-                    sample_types : Dict[int, str] = resource_value.get('sample_types', {})
-                    for sample_type, monitor_resource_key in sample_types.items():
-                        str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
-                        update_or_create_object(self.database, EndPointMonitorModel, str_endpoint_monitor_key, {
-                            'endpoint_fk'    : db_endpoint,
-                            'resource_key'   : monitor_resource_key,
-                            'kpi_sample_type': grpc_to_enum__kpi_sample_type(sample_type),
-                        })
-            except: # pylint: disable=bare-except
-                LOGGER.exception('[AddDevice] endpoints = {:s}'.format(str(endpoints)))
-
-            raw_running_config_rules = driver.GetConfig()
-            running_config_rules = []
-            for resource_key, resource_value in raw_running_config_rules:
-                if isinstance(resource_value, Exception):
-                    msg = 'Error retrieving config rules: {:s} => {:s}'
-                    LOGGER.error(msg.format(str(resource_key), str(resource_value)))
-                    continue
-                config_rule = (ORM_ConfigActionEnum.SET, resource_key, json.dumps(resource_value, sort_keys=True))
-                running_config_rules.append(config_rule)
-
-            #for running_config_rule in running_config_rules:
-            #    LOGGER.info('[AddDevice] running_config_rule: {:s}'.format(str(running_config_rule)))
-            update_config(self.database, device_uuid, 'running', running_config_rules)
-
-            initial_config_rules = driver.GetInitialConfig()
-            update_config(self.database, device_uuid, 'initial', initial_config_rules)
-
-            #LOGGER.info('[AddDevice] db_device = {:s}'.format(str(db_device.dump(
-            #    include_config_rules=True, include_drivers=True, include_endpoints=True))))
-
-            sync_device_to_context(db_device, self.context_client)
-            return DeviceId(**db_device.dump_id())
+            driver : _Driver = get_driver(self.driver_instance_cache, device)
+
+            errors = []
+
+            if len(device.device_endpoints) == 0:
+                # created from request, populate endpoints using driver
+                errors.extend(populate_endpoints(device, driver, self.monitoring_loops))
+
+            if len(device.device_config.config_rules) == len(connection_config_rules):
+                # created from request, populate config rules using driver
+                errors.extend(populate_config_rules(device, driver))
+
+            # TODO: populate components
+
+            if len(errors) > 0:
+                for error in errors: LOGGER.error(error)
+                raise OperationFailedException('AddDevice', extra_details=errors)
+
+            device_id = context_client.SetDevice(device)
+            return device_id
         finally:
             self.mutex_queues.signal_done(device_uuid)
 
@@ -181,107 +97,50 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
 
         self.mutex_queues.wait_my_turn(device_uuid)
         try:
-            sync_device_from_context(device_uuid, self.context_client, self.database)
-
-            context_config_rules = get_config_rules(self.database, device_uuid, 'running')
-            context_config_rules = {config_rule[1]: config_rule[2] for config_rule in context_config_rules}
-            #LOGGER.info('[ConfigureDevice] context_config_rules = {:s}'.format(str(context_config_rules)))
-
-            db_device,_ = update_device_in_local_database(self.database, request)
-
-            request_config_rules = grpc_config_rules_to_raw(request.device_config.config_rules)
-            #LOGGER.info('[ConfigureDevice] request_config_rules = {:s}'.format(str(request_config_rules)))
+            context_client = ContextClient()
+            device = get_device(context_client, device_uuid, rw_copy=True)
+            if device is None:
+                raise NotFoundException('Device', device_uuid, extra_details='loading in ConfigureDevice')
 
-            resources_to_set    : List[Tuple[str, Any]] = [] # key, value
-            resources_to_delete : List[Tuple[str, Any]] = [] # key, value
-
-            for config_rule in request_config_rules:
-                action, key, value = config_rule
-                if action == ORM_ConfigActionEnum.SET:
-                    if (key not in context_config_rules) or (context_config_rules[key] != value):
-                        resources_to_set.append((key, value))
-                elif action == ORM_ConfigActionEnum.DELETE:
-                    if key in context_config_rules:
-                        resources_to_delete.append((key, value))
+            driver : _Driver = self.driver_instance_cache.get(device_uuid)
+            if driver is None:
+                msg = ERROR_MISSING_DRIVER.format(str(device_uuid))
+                raise OperationFailedException('ConfigureDevice', extra_details=msg)
 
-            #LOGGER.info('[ConfigureDevice] resources_to_set = {:s}'.format(str(resources_to_set)))
-            #LOGGER.info('[ConfigureDevice] resources_to_delete = {:s}'.format(str(resources_to_delete)))
+            if request.device_operational_status != DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED:
+                device.device_operational_status = request.device_operational_status
 
             # TODO: use of datastores (might be virtual ones) to enable rollbacks
+            resources_to_set, resources_to_delete = compute_rules_to_add_delete(device, request)
 
             errors = []
-
-            driver : _Driver = self.driver_instance_cache.get(device_uuid)
-            if driver is None:
-                errors.append('Device({:s}) has not been added to this Device instance'.format(str(device_uuid)))
-
-            if len(errors) == 0:
-                results_setconfig = driver.SetConfig(resources_to_set)
-                errors.extend(check_set_errors(resources_to_set, results_setconfig))
-
-            if len(errors) == 0:
-                results_deleteconfig = driver.DeleteConfig(resources_to_delete)
-                errors.extend(check_delete_errors(resources_to_delete, results_deleteconfig))
+            errors.extend(configure_rules(device, driver, resources_to_set))
+            errors.extend(deconfigure_rules(device, driver, resources_to_delete))
 
             if len(errors) > 0:
+                for error in errors: LOGGER.error(error)
                 raise OperationFailedException('ConfigureDevice', extra_details=errors)
 
-            running_config_rules = driver.GetConfig()
-            running_config_rules = [
-                (ORM_ConfigActionEnum.SET, config_rule[0], json.dumps(config_rule[1], sort_keys=True))
-                for config_rule in running_config_rules if not isinstance(config_rule[1], Exception)
-            ]
-            #for running_config_rule in running_config_rules:
-            #    LOGGER.info('[ConfigureDevice] running_config_rule: {:s}'.format(str(running_config_rule)))
-            update_config(self.database, device_uuid, 'running', running_config_rules)
-
-            sync_device_to_context(db_device, self.context_client)
-            return DeviceId(**db_device.dump_id())
+            # Rules updated by configure_rules() and deconfigure_rules() methods.
+            # Code to be removed soon if not needed.
+            del device.device_config.config_rules[:]
+            populate_config_rules(device, driver)
+
+            device_id = context_client.SetDevice(device)
+            return device_id
         finally:
             self.mutex_queues.signal_done(device_uuid)
 
-
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def DeleteDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty:
         device_uuid = request.device_uuid.uuid
 
         self.mutex_queues.wait_my_turn(device_uuid)
         try:
-            self.monitoring_loops.remove(device_uuid)
-
-            sync_device_from_context(device_uuid, self.context_client, self.database)
-            db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
-            if db_device is None: return Empty()
-
+            context_client = ContextClient()
+            self.monitoring_loops.remove_device(device_uuid)
             self.driver_instance_cache.delete(device_uuid)
-            delete_device_from_context(db_device, self.context_client)
-
-            for db_kpi_pk,_ in db_device.references(KpiModel):
-                db_kpi = get_object(self.database, KpiModel, db_kpi_pk)
-                for db_endpoint_monitor_kpi_pk,_ in db_kpi.references(EndPointMonitorKpiModel):
-                    get_object(self.database, EndPointMonitorKpiModel, db_endpoint_monitor_kpi_pk).delete()
-                db_kpi.delete()
-
-            for db_endpoint_pk,_ in db_device.references(EndPointModel):
-                db_endpoint = EndPointModel(self.database, db_endpoint_pk)
-                for db_endpoint_monitor_pk,_ in db_endpoint.references(EndPointMonitorModel):
-                    get_object(self.database, EndPointMonitorModel, db_endpoint_monitor_pk).delete()
-                db_endpoint.delete()
-
-            for db_driver_pk,_ in db_device.references(DriverModel):
-                get_object(self.database, DriverModel, db_driver_pk).delete()
-
-            db_initial_config = ConfigModel(self.database, db_device.device_initial_config_fk)
-            for db_config_rule_pk,_ in db_initial_config.references(ConfigRuleModel):
-                get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete()
-
-            db_running_config = ConfigModel(self.database, db_device.device_running_config_fk)
-            for db_config_rule_pk,_ in db_running_config.references(ConfigRuleModel):
-                get_object(self.database, ConfigRuleModel, db_config_rule_pk).delete()
-
-            db_device.delete()
-            db_initial_config.delete()
-            db_running_config.delete()
+            context_client.RemoveDevice(request)
             return Empty()
         finally:
             self.mutex_queues.signal_done(device_uuid)
@@ -292,177 +151,48 @@ class DeviceServiceServicerImpl(DeviceServiceServicer):
 
         self.mutex_queues.wait_my_turn(device_uuid)
         try:
-            sync_device_from_context(device_uuid, self.context_client, self.database)
-            db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
+            driver : _Driver = self.driver_instance_cache.get(device_uuid)
+            if driver is None:
+                msg = ERROR_MISSING_DRIVER.format(str(device_uuid))
+                raise OperationFailedException('GetInitialConfig', extra_details=msg)
+
+            device_config = DeviceConfig()
+            errors = populate_initial_config_rules(device_uuid, device_config, driver)
+
+            if len(errors) > 0:
+                for error in errors: LOGGER.error(error)
+                raise OperationFailedException('GetInitialConfig', extra_details=errors)
 
-            config_rules = {} if db_device is None else db_device.dump_initial_config()
-            device_config = DeviceConfig(config_rules=config_rules)
             return device_config
         finally:
             self.mutex_queues.signal_done(device_uuid)
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def MonitorDeviceKpi(self, request : MonitoringSettings, context : grpc.ServicerContext) -> Empty:
-        kpi_uuid = request.kpi_id.kpi_id.uuid
-        device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid
+        subscribe = (request.sampling_duration_s > 0.0) and (request.sampling_interval_s > 0.0)
+        manage_kpi_method = subscribe_kpi if subscribe else unsubscribe_kpi
+
+        if subscribe:
+            device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid
+        else:
+            # unsubscribe only carries kpi_uuid; take device_uuid from recorded KPIs
+            kpi_uuid = request.kpi_id.kpi_id.uuid
+            kpi_details = self.monitoring_loops.get_kpi_by_uuid(kpi_uuid)
+            if kpi_details is None:
+                msg = ERROR_MISSING_KPI.format(str(kpi_uuid))
+                raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
+            device_uuid = kpi_details[0]
+
         self.mutex_queues.wait_my_turn(device_uuid)
         try:
-            subscribe = (request.sampling_duration_s > 0.0) and (request.sampling_interval_s > 0.0)
-            if subscribe:
-                db_device : DeviceModel = get_object(self.database, DeviceModel, device_uuid, raise_if_not_found=False)
-                if db_device is None:
-                    msg = 'Device({:s}) has not been added to this Device instance.'.format(str(device_uuid))
-                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-                endpoint_id = request.kpi_descriptor.endpoint_id
-                endpoint_uuid = endpoint_id.endpoint_uuid.uuid
-                str_endpoint_key = key_to_str([device_uuid, endpoint_uuid])
-                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-                endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid
-                if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
-                    str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
-                    str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
-                db_endpoint : EndPointModel = get_object(
-                    self.database, EndPointModel, str_endpoint_key, raise_if_not_found=False)
-                if db_endpoint is None:
-                    msg = 'Device({:s})/EndPoint({:s}) not found. EndPointKey({:s})'.format(
-                        str(device_uuid), str(endpoint_uuid), str(str_endpoint_key))
-                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-                driver : _Driver = self.driver_instance_cache.get(device_uuid)
-                if driver is None:
-                    msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid))
-                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-                sample_type = request.kpi_descriptor.kpi_sample_type
-
-                attributes = {
-                    'kpi_uuid'         : request.kpi_id.kpi_id.uuid,
-                    'kpi_description'  : request.kpi_descriptor.kpi_description,
-                    'kpi_sample_type'  : grpc_to_enum__kpi_sample_type(sample_type),
-                    'device_fk'        : db_device,
-                    'endpoint_fk'      : db_endpoint,
-                    'sampling_duration': request.sampling_duration_s,
-                    'sampling_interval': request.sampling_interval_s,
-                }
-                result : Tuple[KpiModel, bool] = update_or_create_object(self.database, KpiModel, kpi_uuid, attributes)
-                db_kpi, updated = result
-
-                str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
-                db_endpoint_monitor : EndPointMonitorModel = get_object(
-                    self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False)
-                if db_endpoint_monitor is None:
-                    msg = 'SampleType({:s}/{:s}) not supported for Device({:s})/EndPoint({:s}).'.format(
-                        str(sample_type), str(KpiSampleType.Name(sample_type).upper().replace('KPISAMPLETYPE_', '')),
-                        str(device_uuid), str(endpoint_uuid))
-                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-                endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key)
-                str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':')
-                attributes = {
-                    'endpoint_monitor_fk': db_endpoint_monitor,
-                    'kpi_fk'             : db_kpi,
-                }
-                result : Tuple[EndPointMonitorKpiModel, bool] = update_or_create_object(
-                    self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, attributes)
-                db_endpoint_monitor_kpi, updated = result
-
-                resources_to_subscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval
-                resources_to_subscribe.append(
-                    (db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval))
-                results_subscribestate = driver.SubscribeState(resources_to_subscribe)
-                errors = check_subscribe_errors(resources_to_subscribe, results_subscribestate)
-                if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors)
-
-                self.monitoring_loops.add(device_uuid, driver)
-
-            else:
-                db_kpi : KpiModel = get_object(
-                    self.database, KpiModel, kpi_uuid, raise_if_not_found=False)
-                if db_kpi is None:
-                    msg = 'Kpi({:s}) not found'.format(str(kpi_uuid))
-                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-                db_device : DeviceModel = get_object(
-                    self.database, DeviceModel, db_kpi.device_fk, raise_if_not_found=False)
-                if db_device is None:
-                    msg = 'Device({:s}) not found'.format(str(db_kpi.device_fk))
-                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-                device_uuid = db_device.device_uuid
-
-                db_endpoint : EndPointModel = get_object(
-                    self.database, EndPointModel, db_kpi.endpoint_fk, raise_if_not_found=False)
-                if db_endpoint is None:
-                    msg = 'EndPoint({:s}) not found'.format(str(db_kpi.endpoint_fk))
-                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-                endpoint_uuid = db_endpoint.endpoint_uuid
-                str_endpoint_key = db_endpoint.pk
-
-                kpi_sample_type : ORM_KpiSampleTypeEnum = db_kpi.kpi_sample_type
-                sample_type = kpi_sample_type.value
-                str_endpoint_monitor_key = key_to_str([str_endpoint_key, str(sample_type)])
-                db_endpoint_monitor : EndPointMonitorModel = get_object(
-                    self.database, EndPointMonitorModel, str_endpoint_monitor_key, raise_if_not_found=False)
-                if db_endpoint_monitor is None:
-                    msg = 'EndPointMonitor({:s}) not found.'.format(str(str_endpoint_monitor_key))
-                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-                endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', db_endpoint_monitor.resource_key)
-                str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':')
-                db_endpoint_monitor_kpi : EndPointMonitorKpiModel = get_object(
-                    self.database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, raise_if_not_found=False)
-                if db_endpoint_monitor_kpi is None:
-                    msg = 'EndPointMonitorKpi({:s}) not found.'.format(str(str_endpoint_monitor_kpi_key))
-                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-                resources_to_unsubscribe : List[Tuple[str, float, float]] = [] # key, sampling_duration, sampling_interval
-                resources_to_unsubscribe.append(
-                    (db_endpoint_monitor.resource_key, db_kpi.sampling_duration, db_kpi.sampling_interval))
-
-                driver : _Driver = self.driver_instance_cache.get(device_uuid)
-                if driver is None:
-                    msg = 'Device({:s}) has not been added to this Device instance'.format(str(device_uuid))
-                    raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
-
-                results_unsubscribestate = driver.UnsubscribeState(resources_to_unsubscribe)
-                errors = check_unsubscribe_errors(resources_to_unsubscribe, results_unsubscribestate)
-                if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors)
-
-                db_endpoint_monitor_kpi.delete()
-                db_kpi.delete()
-
-                # There is one monitoring loop per device; keep them active since they are re-used by different monitoring
-                # requests.
-                #self.monitoring_loops.remove(device_uuid)
-
-            # Subscriptions are not stored as classical driver config.
-            # TODO: consider adding it somehow in the configuration.
-            # Warning: GetConfig might be very slow in OpenConfig devices
-            #running_config_rules = [
-            #    (config_rule[0], json.dumps(config_rule[1], sort_keys=True))
-            #    for config_rule in driver.GetConfig()
-            #]
-            #context_config_rules = {
-            #    config_rule[1]: config_rule[2]
-            #    for config_rule in get_config_rules(self.database, device_uuid, 'running')
-            #}
-
-            ## each in context, not in running => delete in context
-            ## each in running, not in context => add to context
-            ## each in context and in running, context.value != running.value => update in context
-            #running_config_rules_actions : List[Tuple[ORM_ConfigActionEnum, str, str]] = []
-            #for config_rule_key,config_rule_value in running_config_rules:
-            #    running_config_rules_actions.append((ORM_ConfigActionEnum.SET, config_rule_key, config_rule_value))
-            #    context_config_rules.pop(config_rule_key, None)
-            #for context_rule_key,context_rule_value in context_config_rules.items():
-            #    running_config_rules_actions.append((ORM_ConfigActionEnum.DELETE, context_rule_key, context_rule_value))
-
-            ##msg = '[MonitorDeviceKpi] running_config_rules_action[{:d}]: {:s}'
-            ##for i,running_config_rules_action in enumerate(running_config_rules_actions):
-            ##    LOGGER.info(msg.format(i, str(running_config_rules_action)))
-            #update_config(self.database, device_uuid, 'running', running_config_rules_actions)
-
-            sync_device_to_context(db_device, self.context_client)
+            driver : _Driver = self.driver_instance_cache.get(device_uuid)
+            if driver is None:
+                msg = ERROR_MISSING_DRIVER.format(str(device_uuid))
+                raise OperationFailedException('MonitorDeviceKpi', extra_details=msg)
+
+            errors = manage_kpi_method(request, driver, self.monitoring_loops)
+            if len(errors) > 0: raise OperationFailedException('MonitorDeviceKpi', extra_details=errors)
+
             return Empty()
         finally:
             self.mutex_queues.signal_done(device_uuid)
diff --git a/src/device/service/Errors.py b/src/device/service/Errors.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f2fc499619debef254515f8eb198f339d5930ab
--- /dev/null
+++ b/src/device/service/Errors.py
@@ -0,0 +1,30 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ERROR_MISSING_DRIVER = 'Device({:s}) has not been added to this Device instance'
+ERROR_MISSING_KPI    = 'Kpi({:s}) not found'
+
+ERROR_BAD_ENDPOINT   = 'Device({:s}): GetConfig retrieved malformed Endpoint({:s})'
+
+ERROR_GET            = 'Device({:s}): Unable to Get resource(key={:s}); error({:s})'
+ERROR_GET_INIT       = 'Device({:s}): Unable to Get Initial resource(key={:s}); error({:s})'
+ERROR_DELETE         = 'Device({:s}): Unable to Delete resource(key={:s}, value={:s}); error({:s})'
+ERROR_SET            = 'Device({:s}): Unable to Set resource(key={:s}, value={:s}); error({:s})'
+
+ERROR_SAMPLETYPE     = 'Device({:s})/EndPoint({:s}): SampleType({:s}/{:s}) not supported'
+
+ERROR_SUBSCRIBE      = 'Device({:s}): Unable to Subscribe subscription(key={:s}, duration={:s}, interval={:s}); '+\
+                       'error({:s})'
+ERROR_UNSUBSCRIBE    = 'Device({:s}): Unable to Unsubscribe subscription(key={:s}, duration={:s}, interval={:s}); '+\
+                       'error({:s})'
diff --git a/src/device/service/MonitoringLoops.py b/src/device/service/MonitoringLoops.py
deleted file mode 100644
index 18faed0d51d8d594368a0c80ef03539a9b0c4d4e..0000000000000000000000000000000000000000
--- a/src/device/service/MonitoringLoops.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging, queue, re, threading
-from datetime import datetime
-from typing import Dict
-from common.orm.Database import Database
-from common.orm.HighLevel import get_object
-from common.orm.backend.Tools import key_to_str
-from common.proto.monitoring_pb2 import Kpi
-from monitoring.client.MonitoringClient import MonitoringClient
-from .database.KpiModel import KpiModel
-from .database.RelationModels import EndPointMonitorKpiModel
-from .driver_api._Driver import _Driver
-
-LOGGER = logging.getLogger(__name__)
-QUEUE_GET_WAIT_TIMEOUT = 0.5
-
-class MonitoringLoop:
-    def __init__(self, device_uuid : str, driver : _Driver, samples_queue : queue.Queue) -> None:
-        self._device_uuid = device_uuid
-        self._driver = driver
-        self._samples_queue = samples_queue
-        self._running = threading.Event()
-        self._terminate = threading.Event()
-        self._samples_stream = self._driver.GetState(blocking=True, terminate=self._terminate)
-        self._collector_thread = threading.Thread(target=self._collect, daemon=True)
-
-    def _collect(self) -> None:
-        for sample in self._samples_stream:
-            if self._terminate.is_set(): break
-            sample = (self._device_uuid, *sample)
-            self._samples_queue.put_nowait(sample)
-
-    def start(self):
-        self._collector_thread.start()
-        self._running.set()
-
-    @property
-    def is_running(self): return self._running.is_set()
-
-    def stop(self):
-        self._terminate.set()
-        self._collector_thread.join()
-
-class MonitoringLoops:
-    def __init__(self, database : Database) -> None:
-        self._monitoring_client = MonitoringClient()
-        self._database = database
-        self._samples_queue = queue.Queue()
-        self._running = threading.Event()
-        self._terminate = threading.Event()
-        self._lock = threading.Lock()
-        self._device_uuid__to__monitoring_loop : Dict[str, MonitoringLoop] = {}
-        self._exporter_thread = threading.Thread(target=self._export, daemon=True)
-
-    def add(self, device_uuid : str, driver : _Driver) -> None:
-        with self._lock:
-            monitoring_loop = self._device_uuid__to__monitoring_loop.get(device_uuid)
-            if (monitoring_loop is not None) and monitoring_loop.is_running: return
-            monitoring_loop = MonitoringLoop(device_uuid, driver, self._samples_queue)
-            self._device_uuid__to__monitoring_loop[device_uuid] = monitoring_loop
-            monitoring_loop.start()
-
-    def remove(self, device_uuid : str) -> None:
-        with self._lock:
-            monitoring_loop = self._device_uuid__to__monitoring_loop.get(device_uuid)
-            if monitoring_loop is None: return
-            if monitoring_loop.is_running: monitoring_loop.stop()
-            self._device_uuid__to__monitoring_loop.pop(device_uuid, None)
-
-    def start(self):
-        self._exporter_thread.start()
-
-    @property
-    def is_running(self): return self._running.is_set()
-
-    def stop(self):
-        self._terminate.set()
-        self._exporter_thread.join()
-
-    def _export(self) -> None:
-        if self._database is None:
-            LOGGER.error('[MonitoringLoops:_export] Database not set. Terminating Exporter.')
-            return
-
-        self._running.set()
-        while not self._terminate.is_set():
-            try:
-                sample = self._samples_queue.get(block=True, timeout=QUEUE_GET_WAIT_TIMEOUT)
-                #LOGGER.debug('[MonitoringLoops:_export] sample={:s}'.format(str(sample)))
-            except queue.Empty:
-                continue
-
-            device_uuid, timestamp, endpoint_monitor_resource_key, value = sample
-            endpoint_monitor_resource_key = re.sub('[^A-Za-z0-9]', '.', endpoint_monitor_resource_key)
-            str_endpoint_monitor_kpi_key = key_to_str([device_uuid, endpoint_monitor_resource_key], separator=':')
-
-            #db_entries = self._database.dump()
-            #LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-            #for db_entry in db_entries:
-            #    LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-            #LOGGER.info('-----------------------------------------------------------')
-
-            db_endpoint_monitor_kpi : EndPointMonitorKpiModel = get_object(
-                self._database, EndPointMonitorKpiModel, str_endpoint_monitor_kpi_key, raise_if_not_found=False)
-            if db_endpoint_monitor_kpi is None:
-                LOGGER.warning('EndPointMonitorKpi({:s}) not found'.format(str_endpoint_monitor_kpi_key))
-                continue
-
-            str_kpi_key = db_endpoint_monitor_kpi.kpi_fk
-            db_kpi : KpiModel = get_object(
-                self._database, KpiModel, str_kpi_key, raise_if_not_found=False)
-            if db_kpi is None:
-                LOGGER.warning('Kpi({:s}) not found'.format(str_kpi_key))
-                continue
-
-            # FIXME: uint32 used for intVal results in out of range issues. Temporarily changed to float
-            #        extend the 'kpi_value' to support long integers (uint64 / int64 / ...)
-            if isinstance(value, int):
-                kpi_value_field_name = 'int64Val'
-                kpi_value_field_cast = int
-            elif isinstance(value, float):
-                kpi_value_field_name = 'floatVal'
-                kpi_value_field_cast = float
-            elif isinstance(value, bool):
-                kpi_value_field_name = 'boolVal'
-                kpi_value_field_cast = bool
-            else:
-                kpi_value_field_name = 'stringVal'
-                kpi_value_field_cast = str
-
-            try:
-                self._monitoring_client.IncludeKpi(Kpi(**{
-                    'kpi_id'   : {'kpi_id': {'uuid': db_kpi.kpi_uuid}},
-                    'timestamp': {'timestamp': timestamp},
-                    'kpi_value': {kpi_value_field_name: kpi_value_field_cast(value)}
-                }))
-            except: # pylint: disable=bare-except
-                LOGGER.exception('Unable to format/send Kpi')
-
-        self._running.clear()
diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2cd0b48104857ac8a4525feb28a4ca480e0aec1
--- /dev/null
+++ b/src/device/service/Tools.py
@@ -0,0 +1,253 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+from typing import Any, Dict, List, Tuple, Union
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
+from common.method_wrappers.ServiceExceptions import InvalidArgumentException
+from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceConfig
+from common.proto.device_pb2 import MonitoringSettings
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.tools.grpc.Tools import grpc_message_to_json
+from .driver_api._Driver import _Driver, RESOURCE_ENDPOINTS
+from .monitoring.MonitoringLoops import MonitoringLoops
+from .Errors import (
+    ERROR_BAD_ENDPOINT, ERROR_DELETE, ERROR_GET, ERROR_GET_INIT, ERROR_MISSING_KPI, ERROR_SAMPLETYPE, ERROR_SET,
+    ERROR_SUBSCRIBE, ERROR_UNSUBSCRIBE)
+
+def check_connect_rules(device_config : DeviceConfig) -> Dict[str, Any]:
+    connection_config_rules = dict()
+    unexpected_config_rules = list()
+    for config_rule in device_config.config_rules:
+        is_action_set = (config_rule.action == ConfigActionEnum.CONFIGACTION_SET)
+        is_custom_rule = (config_rule.WhichOneof('config_rule') == 'custom')
+        if is_action_set and is_custom_rule and (config_rule.custom.resource_key.startswith('_connect/')):
+            connect_attribute = config_rule.custom.resource_key.replace('_connect/', '')
+            connection_config_rules[connect_attribute] = config_rule.custom.resource_value
+        else:
+            unexpected_config_rules.append(config_rule)
+
+    if len(unexpected_config_rules) > 0:
+        unexpected_config_rules = grpc_message_to_json(device_config)
+        unexpected_config_rules = unexpected_config_rules['config_rules']
+        unexpected_config_rules = list(filter(
+            lambda cr: cr.get('custom', {})['resource_key'].replace('_connect/', '') not in connection_config_rules,
+            unexpected_config_rules))
+        str_unexpected_config_rules = json.dumps(unexpected_config_rules, sort_keys=True)
+        raise InvalidArgumentException(
+            'device.device_config.config_rules', str_unexpected_config_rules,
+            extra_details='RPC method AddDevice only accepts connection Config Rules that should start '\
+                            'with "_connect/" tag. Others should be configured after adding the device.')
+
+    return connection_config_rules
+
+def get_connect_rules(device_config : DeviceConfig) -> Dict[str, Any]:
+    connect_rules = dict()
+    for config_rule in device_config.config_rules:
+        if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue
+        if config_rule.WhichOneof('config_rule') != 'custom': continue
+        if not config_rule.custom.resource_key.startswith('_connect/'): continue
+        connect_attribute = config_rule.custom.resource_key.replace('_connect/', '')
+        connect_rules[connect_attribute] = config_rule.custom.resource_value
+    return connect_rules
+
+def check_no_endpoints(device_endpoints) -> None:
+    if len(device_endpoints) == 0: return
+    unexpected_endpoints = []
+    for device_endpoint in device_endpoints:
+        unexpected_endpoints.append(grpc_message_to_json(device_endpoint))
+    str_unexpected_endpoints = json.dumps(unexpected_endpoints, sort_keys=True)
+    raise InvalidArgumentException(
+        'device.device_endpoints', str_unexpected_endpoints,
+        extra_details='RPC method AddDevice does not accept Endpoints. Endpoints are discovered through '\
+                        'interrogation of the physical device.')
+
+def populate_endpoints(device : Device, driver : _Driver, monitoring_loops : MonitoringLoops) -> List[str]:
+    device_uuid = device.device_id.device_uuid.uuid
+
+    resources_to_get = [RESOURCE_ENDPOINTS]
+    results_getconfig = driver.GetConfig(resources_to_get)
+
+    errors : List[str] = list()
+    for endpoint in results_getconfig:
+        if len(endpoint) != 2:
+            errors.append(ERROR_BAD_ENDPOINT.format(device_uuid, str(endpoint)))
+            continue
+
+        resource_key, resource_value = endpoint
+        if isinstance(resource_value, Exception):
+            errors.append(ERROR_GET.format(device_uuid, str(resource_key), str(resource_value)))
+            continue
+
+        endpoint_uuid = resource_value.get('uuid')
+
+        device_endpoint = device.device_endpoints.add()
+        device_endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME
+        device_endpoint.endpoint_id.topology_id.topology_uuid.uuid = DEFAULT_TOPOLOGY_NAME
+        device_endpoint.endpoint_id.device_id.device_uuid.uuid = device_uuid
+        device_endpoint.endpoint_id.endpoint_uuid.uuid = endpoint_uuid
+        device_endpoint.endpoint_type = resource_value.get('type')
+
+        sample_types : Dict[int, str] = resource_value.get('sample_types', {})
+        for kpi_sample_type, monitor_resource_key in sample_types.items():
+            device_endpoint.kpi_sample_types.append(kpi_sample_type)
+            monitoring_loops.add_resource_key(device_uuid, endpoint_uuid, kpi_sample_type, monitor_resource_key)
+
+    return errors
+
+def _raw_config_rules_to_grpc(
+    device_uuid : str, device_config : DeviceConfig, error_template : str, default_config_action : ConfigActionEnum,
+    raw_config_rules : List[Tuple[str, Union[Any, Exception, None]]]
+) -> List[str]:
+    errors : List[str] = list()
+
+    for resource_key, resource_value in raw_config_rules:
+        if isinstance(resource_value, Exception):
+            errors.append(error_template.format(device_uuid, str(resource_key), str(resource_value)))
+            continue
+
+        config_rule = device_config.config_rules.add()
+        config_rule.action = default_config_action
+        config_rule.custom.resource_key = resource_key
+        config_rule.custom.resource_value = \
+            resource_value if isinstance(resource_value, str) else json.dumps(resource_value, sort_keys=True)
+
+    return errors
+
+def populate_config_rules(device : Device, driver : _Driver) -> List[str]:
+    device_uuid = device.device_id.device_uuid.uuid
+    results_getconfig = driver.GetConfig()
+    return _raw_config_rules_to_grpc(
+        device_uuid, device.device_config, ERROR_GET, ConfigActionEnum.CONFIGACTION_SET, results_getconfig)
+
+def populate_initial_config_rules(device_uuid : str, device_config : DeviceConfig, driver : _Driver) -> List[str]:
+    results_getinitconfig = driver.GetInitialConfig()
+    return _raw_config_rules_to_grpc(
+        device_uuid, device_config, ERROR_GET_INIT, ConfigActionEnum.CONFIGACTION_SET, results_getinitconfig)
+
+def compute_rules_to_add_delete(
+    device : Device, request : Device
+) -> Tuple[List[Tuple[str, Any]], List[Tuple[str, Any]]]:
+    # convert config rules from context into a dictionary
+    # TODO: add support for non-custom config rules
+    context_config_rules = {
+        config_rule.custom.resource_key: config_rule.custom.resource_value
+        for config_rule in device.device_config.config_rules
+        if config_rule.WhichOneof('config_rule') == 'custom'
+    }
+
+    # convert config rules from request into a list
+    # TODO: add support for non-custom config rules
+    request_config_rules = [
+        (config_rule.action, config_rule.custom.resource_key, config_rule.custom.resource_value)
+        for config_rule in request.device_config.config_rules
+        if config_rule.WhichOneof('config_rule') == 'custom'
+    ]
+
+    resources_to_set    : List[Tuple[str, Any]] = [] # key, value
+    resources_to_delete : List[Tuple[str, Any]] = [] # key, value
+
+    for action, key, value in request_config_rules:
+        if action == ConfigActionEnum.CONFIGACTION_SET:
+            if (key in context_config_rules) and (context_config_rules[key][0] == value): continue
+            resources_to_set.append((key, value))
+        elif action == ConfigActionEnum.CONFIGACTION_DELETE:
+            if key not in context_config_rules: continue
+            resources_to_delete.append((key, value))
+
+    return resources_to_set, resources_to_delete
+
+def configure_rules(device : Device, driver : _Driver, resources_to_set : List[Tuple[str, Any]]) -> List[str]:
+    device_uuid = device.device_id.device_uuid.uuid
+
+    results_setconfig = driver.SetConfig(resources_to_set)
+    results_setconfig = [
+        (resource_key, result if isinstance(result, Exception) else resource_value)
+        for (resource_key, resource_value), result in zip(resources_to_set, results_setconfig)
+    ]
+
+    device_config = DeviceConfig() # ignored; added at the end of ConfigureDevice
+    return _raw_config_rules_to_grpc(
+        device_uuid, device_config, ERROR_SET, ConfigActionEnum.CONFIGACTION_SET, results_setconfig)
+
+def deconfigure_rules(device : Device, driver : _Driver, resources_to_delete : List[Tuple[str, Any]]) -> List[str]:
+    device_uuid = device.device_id.device_uuid.uuid
+
+    results_deleteconfig = driver.DeleteConfig(resources_to_delete)
+    results_deleteconfig = [
+        (resource_key, result if isinstance(result, Exception) else resource_value)
+        for (resource_key, resource_value), result in zip(resources_to_delete, results_deleteconfig)
+    ]
+
+    device_config = DeviceConfig() # ignored; added at the end of ConfigureDevice
+    return _raw_config_rules_to_grpc(
+        device_uuid, device_config, ERROR_DELETE, ConfigActionEnum.CONFIGACTION_DELETE, results_deleteconfig)
+
+def subscribe_kpi(request : MonitoringSettings, driver : _Driver, monitoring_loops : MonitoringLoops) -> List[str]:
+    kpi_uuid = request.kpi_id.kpi_id.uuid
+    device_uuid = request.kpi_descriptor.device_id.device_uuid.uuid
+    endpoint_uuid = request.kpi_descriptor.endpoint_id.endpoint_uuid.uuid
+    kpi_sample_type = request.kpi_descriptor.kpi_sample_type
+
+    resource_key = monitoring_loops.get_resource_key(device_uuid, endpoint_uuid, kpi_sample_type)
+    if resource_key is None:
+        kpi_sample_type_name = KpiSampleType.Name(kpi_sample_type).upper().replace('KPISAMPLETYPE_', '')
+        return [
+            ERROR_SAMPLETYPE.format(
+                str(device_uuid), str(endpoint_uuid), str(kpi_sample_type), str(kpi_sample_type_name)
+            )
+        ]
+
+    sampling_duration = request.sampling_duration_s # seconds
+    sampling_interval = request.sampling_interval_s # seconds
+
+    resources_to_subscribe = [(resource_key, sampling_duration, sampling_interval)]
+    results_subscribestate = driver.SubscribeState(resources_to_subscribe)
+
+    errors : List[str] = list()
+    for (resource_key, duration, interval), result in zip(resources_to_subscribe, results_subscribestate):
+        if isinstance(result, Exception):
+            errors.append(ERROR_SUBSCRIBE.format(
+                str(device_uuid), str(resource_key), str(duration), str(interval), str(result)
+            ))
+            continue
+
+    monitoring_loops.add_kpi(device_uuid, resource_key, kpi_uuid, sampling_duration, sampling_interval)
+    monitoring_loops.add_device(device_uuid, driver)
+
+    return errors
+
+def unsubscribe_kpi(request : MonitoringSettings, driver : _Driver, monitoring_loops : MonitoringLoops) -> List[str]:
+    kpi_uuid = request.kpi_id.kpi_id.uuid    
+
+    kpi_details = monitoring_loops.get_kpi_by_uuid(kpi_uuid)
+    if kpi_details is None:
+        return [ERROR_MISSING_KPI.format(str(kpi_uuid))]
+
+    device_uuid, resource_key, sampling_duration, sampling_interval = kpi_details
+
+    resources_to_unsubscribe = [(resource_key, sampling_duration, sampling_interval)]
+    results_unsubscribestate = driver.UnsubscribeState(resources_to_unsubscribe)
+
+    errors : List[str] = list()
+    for (resource_key, duration, interval), result in zip(resources_to_unsubscribe, results_unsubscribestate):
+        if isinstance(result, Exception):
+            errors.append(ERROR_UNSUBSCRIBE.format(
+                device_uuid, str(resource_key), str(duration), str(interval), str(result)))
+            continue
+
+    monitoring_loops.remove_kpi(kpi_uuid)
+    #monitoring_loops.remove_device(device_uuid) # Do not remove; one monitoring_loop/device used by multiple requests
+
+    return errors
diff --git a/src/device/service/__main__.py b/src/device/service/__main__.py
index 5c9b41531e7bc579cbe5cc563f20b193f6bc5a90..c69393fc3b9347b7bedfb579b67e79605f14714f 100644
--- a/src/device/service/__main__.py
+++ b/src/device/service/__main__.py
@@ -20,7 +20,7 @@ from common.Settings import (
     wait_for_environment_variables)
 from .DeviceService import DeviceService
 from .driver_api.DriverFactory import DriverFactory
-from .driver_api.DriverInstanceCache import DriverInstanceCache
+from .driver_api.DriverInstanceCache import DriverInstanceCache, preload_drivers
 from .drivers import DRIVERS
 
 terminate = threading.Event()
@@ -58,6 +58,9 @@ def main():
     driver_factory = DriverFactory(DRIVERS)
     driver_instance_cache = DriverInstanceCache(driver_factory)
 
+    # Initialize drivers with existing devices in context
+    preload_drivers(driver_instance_cache)
+
     # Starting device service
     grpc_service = DeviceService(driver_instance_cache)
     grpc_service.start()
diff --git a/src/device/service/database/ConfigModel.py b/src/device/service/database/ConfigModel.py
deleted file mode 100644
index 8472a44eaefefceaee36dcbe40d9a427eb2cbb36..0000000000000000000000000000000000000000
--- a/src/device/service/database/ConfigModel.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import functools, logging, operator
-from enum import Enum
-from typing import Dict, List, Tuple, Union
-from common.orm.Database import Database
-from common.orm.HighLevel import get_object, get_or_create_object, update_or_create_object
-from common.orm.backend.Tools import key_to_str
-from common.orm.fields.EnumeratedField import EnumeratedField
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.IntegerField import IntegerField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-from common.proto.context_pb2 import ConfigActionEnum
-from common.tools.grpc.Tools import grpc_message_to_json_string
-from .Tools import fast_hasher, grpc_to_enum, remove_dict_key
-
-LOGGER = logging.getLogger(__name__)
-
-class ORM_ConfigActionEnum(Enum):
-    UNDEFINED = ConfigActionEnum.CONFIGACTION_UNDEFINED
-    SET       = ConfigActionEnum.CONFIGACTION_SET
-    DELETE    = ConfigActionEnum.CONFIGACTION_DELETE
-
-grpc_to_enum__config_action = functools.partial(
-    grpc_to_enum, ConfigActionEnum, ORM_ConfigActionEnum)
-
-class ConfigModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-
-    def dump(self) -> List[Dict]:
-        db_config_rule_pks = self.references(ConfigRuleModel)
-        config_rules = [ConfigRuleModel(self.database, pk).dump(include_position=True) for pk,_ in db_config_rule_pks]
-        config_rules = sorted(config_rules, key=operator.itemgetter('position'))
-        return [remove_dict_key(config_rule, 'position') for config_rule in config_rules]
-
-class ConfigRuleModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    config_fk = ForeignKeyField(ConfigModel)
-    position = IntegerField(min_value=0, required=True)
-    action = EnumeratedField(ORM_ConfigActionEnum, required=True)
-    key = StringField(required=True, allow_empty=False)
-    value = StringField(required=False, allow_empty=True)
-
-    def dump(self, include_position=True) -> Dict: # pylint: disable=arguments-differ
-        result = {
-            'action': self.action.value,
-            'custom': {
-                'resource_key': self.key,
-                'resource_value': self.value,
-            },
-        }
-        if include_position: result['position'] = self.position
-        return result
-
-def delete_all_config_rules(database : Database, db_parent_pk : str, config_name : str) -> None:
-    str_config_key = key_to_str([db_parent_pk, config_name], separator=':')
-    db_config : ConfigModel = get_object(database, ConfigModel, str_config_key, raise_if_not_found=False)
-    if db_config is None: return
-    db_config_rule_pks = db_config.references(ConfigRuleModel)
-    for pk,_ in db_config_rule_pks: ConfigRuleModel(database, pk).delete()
-
-def grpc_config_rules_to_raw(grpc_config_rules) -> List[Tuple[ORM_ConfigActionEnum, str, str]]:
-    def translate(grpc_config_rule):
-        action = grpc_to_enum__config_action(grpc_config_rule.action)
-        config_rule_type = str(grpc_config_rule.WhichOneof('config_rule'))
-        if config_rule_type != 'custom':
-            raise NotImplementedError('ConfigRule of type {:s} is not implemented: {:s}'.format(
-                config_rule_type, grpc_message_to_json_string(grpc_config_rule)))
-        return action, grpc_config_rule.custom.resource_key, grpc_config_rule.custom.resource_value
-    return [translate(grpc_config_rule) for grpc_config_rule in grpc_config_rules]
-
-def get_config_rules(
-    database : Database, db_parent_pk : str, config_name : str
-    ) -> List[Tuple[ORM_ConfigActionEnum, str, str]]:
-
-    str_config_key = key_to_str([db_parent_pk, config_name], separator=':')
-    db_config = get_object(database, ConfigModel, str_config_key, raise_if_not_found=False)
-    return [] if db_config is None else [
-        # pylint: disable=no-member, protected-access
-        (ORM_ConfigActionEnum._value2member_map_.get(config_rule['action']),
-            config_rule['custom']['resource_key'], config_rule['custom']['resource_value'])
-        for config_rule in db_config.dump()
-        if 'custom' in config_rule
-    ]
-
-def update_config(
-    database : Database, db_parent_pk : str, config_name : str,
-    raw_config_rules : List[Tuple[ORM_ConfigActionEnum, str, str]]
-) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]:
-
-    str_config_key = key_to_str([db_parent_pk, config_name], separator=':')
-    result : Tuple[ConfigModel, bool] = get_or_create_object(database, ConfigModel, str_config_key)
-    db_config, created = result
-
-    db_objects : List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]] = [(db_config, created)]
-
-    for position,(action, resource_key, resource_value) in enumerate(raw_config_rules):
-        str_rule_key_hash = fast_hasher(resource_key)
-        str_config_rule_key = key_to_str([db_config.pk, str_rule_key_hash], separator=':')
-        result : Tuple[ConfigRuleModel, bool] = update_or_create_object(
-            database, ConfigRuleModel, str_config_rule_key, {
-                'config_fk': db_config, 'position': position, 'action': action, 'key': resource_key,
-                'value': resource_value,
-            })
-        db_config_rule, updated = result
-        db_objects.append((db_config_rule, updated))
-
-    return db_objects
diff --git a/src/device/service/database/ContextModel.py b/src/device/service/database/ContextModel.py
deleted file mode 100644
index a609e1ba9189f5359064e6628cba6c08d353770e..0000000000000000000000000000000000000000
--- a/src/device/service/database/ContextModel.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from typing import Dict, List
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-
-LOGGER = logging.getLogger(__name__)
-
-class ContextModel(Model):
-    pk = PrimaryKeyField()
-    context_uuid = StringField(required=True, allow_empty=False)
-
-    def dump_id(self) -> Dict:
-        return {'context_uuid': {'uuid': self.context_uuid}}
-
-    def dump_topology_ids(self) -> List[Dict]:
-        from .TopologyModel import TopologyModel # pylint: disable=import-outside-toplevel
-        db_topology_pks = self.references(TopologyModel)
-        return [TopologyModel(self.database, pk).dump_id() for pk,_ in db_topology_pks]
-
-    def dump(self, include_topologies=False) -> Dict: # pylint: disable=arguments-differ
-        result = {'context_id': self.dump_id()}
-        if include_topologies: result['topology_ids'] = self.dump_topology_ids()
-        return result
diff --git a/src/device/service/database/DatabaseTools.py b/src/device/service/database/DatabaseTools.py
deleted file mode 100644
index 9d3b712cade921849a5b34be3a837e4f6697b76f..0000000000000000000000000000000000000000
--- a/src/device/service/database/DatabaseTools.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import grpc
-from typing import Any, Dict, Tuple
-from common.method_wrappers.ServiceExceptions import InvalidArgumentException
-from common.orm.Database import Database
-from common.orm.HighLevel import get_or_create_object, update_or_create_object
-from common.orm.backend.Tools import key_to_str
-from common.proto.context_pb2 import Device, DeviceId
-from context.client.ContextClient import ContextClient
-from device.service.driver_api.FilterFields import FilterFieldEnum
-from .ConfigModel import delete_all_config_rules, grpc_config_rules_to_raw, update_config
-from .ContextModel import ContextModel
-from .DeviceModel import DeviceModel, DriverModel, grpc_to_enum__device_operational_status, set_drivers
-from .EndPointModel import EndPointModel, set_endpoint_monitors
-from .TopologyModel import TopologyModel
-
-def update_device_in_local_database(database : Database, device : Device) -> Tuple[DeviceModel, bool]:
-    device_uuid = device.device_id.device_uuid.uuid
-
-    for i,endpoint in enumerate(device.device_endpoints):
-        endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid
-        if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid
-        if device_uuid != endpoint_device_uuid:
-            raise InvalidArgumentException(
-                'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid,
-                ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)])
-
-    initial_config_result = update_config(database, device_uuid, 'initial', [])
-
-    config_rules = grpc_config_rules_to_raw(device.device_config.config_rules)
-    delete_all_config_rules(database, device_uuid, 'running')
-    running_config_result = update_config(database, device_uuid, 'running', config_rules)
-
-    result : Tuple[DeviceModel, bool] = update_or_create_object(database, DeviceModel, device_uuid, {
-        'device_uuid'              : device_uuid,
-        'device_type'              : device.device_type,
-        'device_operational_status': grpc_to_enum__device_operational_status(device.device_operational_status),
-        'device_initial_config_fk' : initial_config_result[0][0],
-        'device_running_config_fk' : running_config_result[0][0],
-    })
-    db_device, updated = result
-    set_drivers(database, db_device, device.device_drivers)
-
-    for i,endpoint in enumerate(device.device_endpoints):
-        endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
-        endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid
-        if len(endpoint_device_uuid) == 0: endpoint_device_uuid = device_uuid
-
-        str_endpoint_key = key_to_str([device_uuid, endpoint_uuid])
-        endpoint_attributes = {
-            'device_fk'    : db_device,
-            'endpoint_uuid': endpoint_uuid,
-            'endpoint_type': endpoint.endpoint_type,
-        }
-
-        endpoint_topology_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid
-        endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid
-        if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
-            result : Tuple[ContextModel, bool] = get_or_create_object(
-                database, ContextModel, endpoint_topology_context_uuid, defaults={
-                    'context_uuid': endpoint_topology_context_uuid,
-                })
-            db_context, _ = result
-
-            str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
-            result : Tuple[TopologyModel, bool] = get_or_create_object(
-                database, TopologyModel, str_topology_key, defaults={
-                    'context_fk': db_context,
-                    'topology_uuid': endpoint_topology_uuid,
-                })
-            db_topology, _ = result
-
-            str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
-            endpoint_attributes['topology_fk'] = db_topology
-
-        result : Tuple[EndPointModel, bool] = update_or_create_object(
-            database, EndPointModel, str_endpoint_key, endpoint_attributes)
-        db_endpoint, db_endpoint_updated = result
-
-        set_endpoint_monitors(database, db_endpoint, endpoint.kpi_sample_types)
-
-        updated = updated or db_endpoint_updated
-
-    return db_device, updated
-
-def sync_device_from_context(
-    device_uuid : str, context_client : ContextClient, database : Database
-    ) -> Tuple[DeviceModel, bool]:
-
-    try:
-        device : Device = context_client.GetDevice(DeviceId(device_uuid={'uuid': device_uuid}))
-    except grpc.RpcError as e:
-        if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member
-        return None
-    return update_device_in_local_database(database, device)
-
-def sync_device_to_context(db_device : DeviceModel, context_client : ContextClient) -> None:
-    if db_device is None: return
-    context_client.SetDevice(Device(**db_device.dump(
-        include_config_rules=True, include_drivers=True, include_endpoints=True)))
-
-def delete_device_from_context(db_device : DeviceModel, context_client : ContextClient) -> None:
-    if db_device is None: return
-    context_client.RemoveDevice(DeviceId(**db_device.dump_id()))
-
-def get_device_driver_filter_fields(db_device : DeviceModel) -> Dict[FilterFieldEnum, Any]:
-    if db_device is None: return {}
-    database = db_device.database
-    db_driver_pks = db_device.references(DriverModel)
-    db_driver_names = [DriverModel(database, pk).driver.value for pk,_ in db_driver_pks]
-    return {
-        FilterFieldEnum.DEVICE_TYPE: db_device.device_type,
-        FilterFieldEnum.DRIVER     : db_driver_names,
-    }
diff --git a/src/device/service/database/DeviceModel.py b/src/device/service/database/DeviceModel.py
deleted file mode 100644
index 9dd63d36efebf135b7bb38845d917bc9e03dc100..0000000000000000000000000000000000000000
--- a/src/device/service/database/DeviceModel.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import functools, logging
-from enum import Enum
-from typing import Dict, List
-from common.orm.Database import Database
-from common.orm.backend.Tools import key_to_str
-from common.orm.fields.EnumeratedField import EnumeratedField
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-from common.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusEnum
-from .ConfigModel import ConfigModel
-from .Tools import grpc_to_enum
-
-LOGGER = logging.getLogger(__name__)
-
-class ORM_DeviceDriverEnum(Enum):
-    UNDEFINED             = DeviceDriverEnum.DEVICEDRIVER_UNDEFINED
-    OPENCONFIG            = DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG
-    TRANSPORT_API         = DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API
-    P4                    = DeviceDriverEnum.DEVICEDRIVER_P4
-    IETF_NETWORK_TOPOLOGY = DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY
-    ONF_TR_352            = DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352
-    XR                    = DeviceDriverEnum.DEVICEDRIVER_XR
-
-grpc_to_enum__device_driver = functools.partial(
-    grpc_to_enum, DeviceDriverEnum, ORM_DeviceDriverEnum)
-
-class ORM_DeviceOperationalStatusEnum(Enum):
-    UNDEFINED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_UNDEFINED
-    DISABLED  = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
-    ENABLED   = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
-
-grpc_to_enum__device_operational_status = functools.partial(
-    grpc_to_enum, DeviceOperationalStatusEnum, ORM_DeviceOperationalStatusEnum)
-
-class DeviceModel(Model):
-    pk = PrimaryKeyField()
-    device_uuid = StringField(required=True, allow_empty=False)
-    device_type = StringField()
-    device_initial_config_fk = ForeignKeyField(ConfigModel)
-    device_running_config_fk = ForeignKeyField(ConfigModel)
-    device_operational_status = EnumeratedField(ORM_DeviceOperationalStatusEnum, required=True)
-
-    def dump_id(self) -> Dict:
-        return {'device_uuid': {'uuid': self.device_uuid}}
-
-    def dump_initial_config(self) -> Dict:
-        return ConfigModel(self.database, self.device_initial_config_fk).dump()
-
-    def dump_running_config(self) -> Dict:
-        return ConfigModel(self.database, self.device_running_config_fk).dump()
-
-    def dump_drivers(self) -> List[int]:
-        db_driver_pks = self.references(DriverModel)
-        return [DriverModel(self.database, pk).dump() for pk,_ in db_driver_pks]
-
-    def dump_endpoints(self) -> List[Dict]:
-        from .EndPointModel import EndPointModel # pylint: disable=import-outside-toplevel
-        db_endpoints_pks = self.references(EndPointModel)
-        return [EndPointModel(self.database, pk).dump() for pk,_ in db_endpoints_pks]
-
-    def dump(   # pylint: disable=arguments-differ
-            self, include_config_rules=True, include_drivers=True, include_endpoints=True
-        ) -> Dict:
-        result = {
-            'device_id': self.dump_id(),
-            'device_type': self.device_type,
-            'device_operational_status': self.device_operational_status.value,
-        }
-        if include_config_rules: result.setdefault('device_config', {})['config_rules'] = self.dump_running_config()
-        if include_drivers: result['device_drivers'] = self.dump_drivers()
-        if include_endpoints: result['device_endpoints'] = self.dump_endpoints()
-        return result
-
-class DriverModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    device_fk = ForeignKeyField(DeviceModel)
-    driver = EnumeratedField(ORM_DeviceDriverEnum, required=True)
-
-    def dump(self) -> Dict:
-        return self.driver.value
-
-def set_drivers(database : Database, db_device : DeviceModel, grpc_device_drivers):
-    db_device_pk = db_device.pk
-    for driver in grpc_device_drivers:
-        orm_driver = grpc_to_enum__device_driver(driver)
-        str_device_driver_key = key_to_str([db_device_pk, orm_driver.name])
-        db_device_driver = DriverModel(database, str_device_driver_key)
-        db_device_driver.device_fk = db_device
-        db_device_driver.driver = orm_driver
-        db_device_driver.save()
diff --git a/src/device/service/database/EndPointModel.py b/src/device/service/database/EndPointModel.py
deleted file mode 100644
index 3d4435737349809c527c80546ed412e621afcbdd..0000000000000000000000000000000000000000
--- a/src/device/service/database/EndPointModel.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from typing import Dict, List
-from common.orm.Database import Database
-from common.orm.HighLevel import update_or_create_object
-from common.orm.backend.Tools import key_to_str
-from common.orm.fields.EnumeratedField import EnumeratedField
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-from .DeviceModel import DeviceModel
-from .KpiSampleType import ORM_KpiSampleTypeEnum, grpc_to_enum__kpi_sample_type
-from .TopologyModel import TopologyModel
-
-LOGGER = logging.getLogger(__name__)
-
-class EndPointModel(Model):
-    pk = PrimaryKeyField()
-    topology_fk = ForeignKeyField(TopologyModel, required=False)
-    device_fk = ForeignKeyField(DeviceModel)
-    endpoint_uuid = StringField(required=True, allow_empty=False)
-    endpoint_type = StringField()
-
-    def dump_id(self) -> Dict:
-        device_id = DeviceModel(self.database, self.device_fk).dump_id()
-        result = {
-            'device_id': device_id,
-            'endpoint_uuid': {'uuid': self.endpoint_uuid},
-        }
-        if self.topology_fk is not None:
-            result['topology_id'] = TopologyModel(self.database, self.topology_fk).dump_id()
-        return result
-
-    def dump_kpi_sample_types(self) -> List[int]:
-        db_kpi_sample_type_pks = self.references(EndPointMonitorModel)
-        return [EndPointMonitorModel(self.database, pk).dump() for pk,_ in db_kpi_sample_type_pks]
-
-    def dump(   # pylint: disable=arguments-differ
-            self, include_kpi_sample_types=True
-        ) -> Dict:
-        result = {
-            'endpoint_id': self.dump_id(),
-            'endpoint_type': self.endpoint_type,
-        }
-        if include_kpi_sample_types: result['kpi_sample_types'] = self.dump_kpi_sample_types()
-        return result
-
-class EndPointMonitorModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    endpoint_fk = ForeignKeyField(EndPointModel)
-    resource_key = StringField(required=True, allow_empty=True)
-    kpi_sample_type = EnumeratedField(ORM_KpiSampleTypeEnum, required=True)
-
-    def dump(self) -> Dict:
-        return self.kpi_sample_type.value
-
-def set_endpoint_monitors(database : Database, db_endpoint : EndPointModel, grpc_endpoint_kpi_sample_types):
-    db_endpoint_pk = db_endpoint.pk
-    for kpi_sample_type in grpc_endpoint_kpi_sample_types:
-        orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type)
-        str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, str(orm_kpi_sample_type.value)])
-        update_or_create_object(database, EndPointMonitorModel, str_endpoint_kpi_sample_type_key, {
-            'endpoint_fk'    : db_endpoint,
-            'kpi_sample_type': orm_kpi_sample_type,
-        })
diff --git a/src/device/service/database/KpiModel.py b/src/device/service/database/KpiModel.py
deleted file mode 100644
index e3631d38099c02cd459af7f8393b6991c476bd92..0000000000000000000000000000000000000000
--- a/src/device/service/database/KpiModel.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from typing import Dict
-from common.orm.fields.EnumeratedField import EnumeratedField
-from common.orm.fields.FloatField import FloatField
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-from .DeviceModel import DeviceModel
-from .EndPointModel import EndPointModel
-from .KpiSampleType import ORM_KpiSampleTypeEnum
-
-LOGGER = logging.getLogger(__name__)
-
-class KpiModel(Model):
-    pk = PrimaryKeyField()
-    kpi_uuid = StringField(required=True, allow_empty=False)
-    kpi_description = StringField(required=False, allow_empty=True)
-    kpi_sample_type = EnumeratedField(ORM_KpiSampleTypeEnum, required=True)
-    device_fk = ForeignKeyField(DeviceModel)
-    endpoint_fk = ForeignKeyField(EndPointModel)
-    sampling_duration = FloatField(min_value=0, required=True)
-    sampling_interval = FloatField(min_value=0, required=True)
-
-    def dump_id(self) -> Dict:
-        return {'kpi_id': {'uuid': self.kpi_uuid}}
-
-    def dump_descriptor(self) -> Dict:
-        result = {
-            'kpi_description': self.kpi_description,
-            'kpi_sample_type': self.kpi_sample_type.value,
-        }
-        if self.device_fk is not None:
-            result['device_id'] = DeviceModel(self.database, self.device_fk).dump_id()
-        if self.endpoint_fk is not None:
-            result['endpoint_id'] = EndPointModel(self.database, self.endpoint_fk).dump_id()
-        return result
-
-    def dump(self) -> Dict:
-        return {
-            'kpi_id': self.dump_id(),
-            'kpi_descriptor': self.dump_descriptor(),
-            'sampling_duration_s': self.sampling_duration,
-            'sampling_interval_s': self.sampling_interval,
-        }
diff --git a/src/device/service/database/KpiSampleType.py b/src/device/service/database/KpiSampleType.py
deleted file mode 100644
index 0a2015b3fdeaceeed8b01619805f55f2a9267468..0000000000000000000000000000000000000000
--- a/src/device/service/database/KpiSampleType.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import functools
-from enum import Enum
-from common.proto.kpi_sample_types_pb2 import KpiSampleType
-from .Tools import grpc_to_enum
-
-class ORM_KpiSampleTypeEnum(Enum):
-    UNKNOWN             = KpiSampleType.KPISAMPLETYPE_UNKNOWN
-    PACKETS_TRANSMITTED = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED
-    PACKETS_RECEIVED    = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
-    BYTES_TRANSMITTED   = KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED
-    BYTES_RECEIVED      = KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED
-
-grpc_to_enum__kpi_sample_type = functools.partial(
-    grpc_to_enum, KpiSampleType, ORM_KpiSampleTypeEnum)
diff --git a/src/device/service/database/TopologyModel.py b/src/device/service/database/TopologyModel.py
deleted file mode 100644
index f9e9c0b1a26fdf8faca7e1cbe0a64b582bdd4d5d..0000000000000000000000000000000000000000
--- a/src/device/service/database/TopologyModel.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from typing import Dict
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.fields.StringField import StringField
-from common.orm.model.Model import Model
-from .ContextModel import ContextModel
-
-LOGGER = logging.getLogger(__name__)
-
-class TopologyModel(Model):
-    pk = PrimaryKeyField()
-    context_fk = ForeignKeyField(ContextModel)
-    topology_uuid = StringField(required=True, allow_empty=False)
-
-    def dump_id(self) -> Dict:
-        context_id = ContextModel(self.database, self.context_fk).dump_id()
-        return {
-            'context_id': context_id,
-            'topology_uuid': {'uuid': self.topology_uuid},
-        }
-
-    def dump(self) -> Dict:
-        result = {'topology_id': self.dump_id()}
-        return result
diff --git a/src/device/service/driver_api/DriverInstanceCache.py b/src/device/service/driver_api/DriverInstanceCache.py
index 41cc66363885e28082aa353ec46950fbf6ce10e0..29fecf36ff00031de393b30b3d9f6eef3b0c5343 100644
--- a/src/device/service/driver_api/DriverInstanceCache.py
+++ b/src/device/service/driver_api/DriverInstanceCache.py
@@ -12,12 +12,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, threading
+import json, logging, threading
 from typing import Any, Dict, Optional
+from common.method_wrappers.ServiceExceptions import InvalidArgumentException
+from common.proto.context_pb2 import Device, Empty
+from context.client.ContextClient import ContextClient
+from device.service.Tools import get_connect_rules
 from ._Driver import _Driver
 from .DriverFactory import DriverFactory
 from .Exceptions import DriverInstanceCacheTerminatedException
-from .FilterFields import FilterFieldEnum
+from .FilterFields import FilterFieldEnum, get_device_driver_filter_fields
 
 LOGGER = logging.getLogger(__name__)
 
@@ -30,7 +34,8 @@ class DriverInstanceCache:
 
     def get(
         self, device_uuid : str, filter_fields : Dict[FilterFieldEnum, Any] = {}, address : Optional[str] = None,
-        port : Optional[int] = None, settings : Dict[str, Any] = {}) -> _Driver:
+        port : Optional[int] = None, settings : Dict[str, Any] = {}
+    ) -> _Driver:
 
         if self._terminate.is_set():
             raise DriverInstanceCacheTerminatedException()
@@ -61,10 +66,44 @@ class DriverInstanceCache:
         self._terminate.set()
         with self._lock:
             while len(self._device_uuid__to__driver_instance) > 0:
+                device_uuid,device_driver = self._device_uuid__to__driver_instance.popitem()
                 try:
-                    device_uuid,device_driver = self._device_uuid__to__driver_instance.popitem()
                     device_driver.Disconnect()
                 except: # pylint: disable=bare-except
                     msg = 'Error disconnecting Driver({:s}) from device. Will retry later...'
                     LOGGER.exception(msg.format(device_uuid))
+                    # re-adding to retry disconnect
                     self._device_uuid__to__driver_instance[device_uuid] = device_driver
+
+def get_driver(driver_instance_cache : DriverInstanceCache, device : Device) -> _Driver:
+    device_uuid = device.device_id.device_uuid.uuid
+
+    driver : _Driver = driver_instance_cache.get(device_uuid)
+    if driver is not None: return driver
+
+    driver_filter_fields = get_device_driver_filter_fields(device)
+    connect_rules = get_connect_rules(device.device_config)
+
+    #LOGGER.info('[get_driver] connect_rules = {:s}'.format(str(connect_rules)))
+    address  = connect_rules.get('address',  '127.0.0.1')
+    port     = connect_rules.get('port',     '0')
+    settings = connect_rules.get('settings', '{}')
+
+    try:
+        settings = json.loads(settings)
+    except ValueError as e:
+        raise InvalidArgumentException(
+            'device.device_config.config_rules[settings]', settings,
+            extra_details='_connect/settings Config Rules provided cannot be decoded as JSON dictionary.'
+        ) from e
+
+    driver : _Driver = driver_instance_cache.get(
+        device_uuid, filter_fields=driver_filter_fields, address=address, port=port, settings=settings)
+    driver.Connect()
+
+    return driver
+
+def preload_drivers(driver_instance_cache : DriverInstanceCache) -> None:
+    context_client = ContextClient()
+    devices = context_client.ListDevices(Empty())
+    for device in devices.devices: get_driver(driver_instance_cache, device)
diff --git a/src/device/service/driver_api/FilterFields.py b/src/device/service/driver_api/FilterFields.py
index 9ea5445903958286d68ff3246e0801e0a7955d2a..ba277e5236d141e170e1d3988403f4a28c623860 100644
--- a/src/device/service/driver_api/FilterFields.py
+++ b/src/device/service/driver_api/FilterFields.py
@@ -13,8 +13,9 @@
 # limitations under the License.
 
 from enum import Enum
+from typing import Any, Dict, Optional
 from common.DeviceTypes import DeviceTypeEnum
-from device.service.database.DeviceModel import ORM_DeviceDriverEnum
+from common.proto.context_pb2 import Device, DeviceDriverEnum
 
 class FilterFieldEnum(Enum):
     DEVICE_TYPE   = 'device_type'
@@ -26,8 +27,15 @@ class FilterFieldEnum(Enum):
 # Map allowed filter fields to allowed values per Filter field. If no restriction (free text) None is specified
 FILTER_FIELD_ALLOWED_VALUES = {
     FilterFieldEnum.DEVICE_TYPE.value   : {i.value for i in DeviceTypeEnum},
-    FilterFieldEnum.DRIVER.value        : {i.value for i in ORM_DeviceDriverEnum},
+    FilterFieldEnum.DRIVER.value        : set(DeviceDriverEnum.values()),
     FilterFieldEnum.VENDOR.value        : None,
     FilterFieldEnum.MODEL.value         : None,
     FilterFieldEnum.SERIAL_NUMBER.value : None,
 }
+
+def get_device_driver_filter_fields(device : Optional[Device]) -> Dict[FilterFieldEnum, Any]:
+    if device is None: return {}
+    return {
+        FilterFieldEnum.DEVICE_TYPE: device.device_type,
+        FilterFieldEnum.DRIVER     : [driver for driver in device.device_drivers],
+    }
diff --git a/src/device/service/driver_api/Tools.py b/src/device/service/driver_api/Tools.py
deleted file mode 100644
index 19c81d89bfe7e7e1bd46edb205eaf1f2b4bee778..0000000000000000000000000000000000000000
--- a/src/device/service/driver_api/Tools.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import operator
-from typing import Any, Callable, List, Tuple, Union
-
-ACTION_MSG_GET         = 'Get resource(key={:s})'
-ACTION_MSG_SET         = 'Set resource(key={:s}, value={:s})'
-ACTION_MSG_DELETE      = 'Delete resource(key={:s}, value={:s})'
-ACTION_MSG_SUBSCRIBE   = 'Subscribe subscription(key={:s}, duration={:s}, interval={:s})'
-ACTION_MSG_UNSUBSCRIBE = 'Unsubscribe subscription(key={:s}, duration={:s}, interval={:s})'
-
-def _get(resource_key : str):
-    return ACTION_MSG_GET.format(str(resource_key))
-
-def _set(resource : Tuple[str, Any]):
-    return ACTION_MSG_SET.format(*tuple(map(str, resource)))
-
-def _delete(resource : Tuple[str, Any]):
-    return ACTION_MSG_SET.format(*tuple(map(str, resource)))
-
-def _subscribe(subscription : Tuple[str, float, float]):
-    return ACTION_MSG_SUBSCRIBE.format(*tuple(map(str, subscription)))
-
-def _unsubscribe(subscription : Tuple[str, float, float]):
-    return ACTION_MSG_UNSUBSCRIBE.format(*tuple(map(str, subscription)))
-
-def _check_errors(
-    error_func : Callable, parameters_list : List[Any], results_list : List[Union[bool, Exception]]
-    ) -> List[str]:
-    errors = []
-    for parameters, results in zip(parameters_list, results_list):
-        if not isinstance(results, Exception): continue
-        errors.append('Unable to {:s}; error({:s})'.format(error_func(parameters), str(results)))
-    return errors
-
-def check_get_errors(
-    resource_keys : List[str], results : List[Tuple[str, Union[Any, None, Exception]]]
-    ) -> List[str]:
-    return _check_errors(_get, resource_keys, map(operator.itemgetter(1), results))
-
-def check_set_errors(
-    resources : List[Tuple[str, Any]], results : List[Union[bool, Exception]]
-    ) -> List[str]:
-    return _check_errors(_set, resources, results)
-
-def check_delete_errors(
-    resources : List[Tuple[str, Any]], results : List[Union[bool, Exception]]
-    ) -> List[str]:
-    return _check_errors(_delete, resources, results)
-
-def check_subscribe_errors(
-    subscriptions : List[Tuple[str, float, float]], results : List[Union[bool, Exception]]
-    ) -> List[str]:
-    return _check_errors(_subscribe, subscriptions, results)
-
-def check_unsubscribe_errors(
-    subscriptions : List[Tuple[str, float, float]], results : List[Union[bool, Exception]]
-    ) -> List[str]:
-    return _check_errors(_unsubscribe, subscriptions, results)
diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py
index 4e4a9ac11363958fb4609976ce8609745bb97c01..bde5c93a5f40872d3dc67f5ebf86564fd644db87 100644
--- a/src/device/service/drivers/__init__.py
+++ b/src/device/service/drivers/__init__.py
@@ -14,7 +14,8 @@
 
 import os
 from common.DeviceTypes import DeviceTypeEnum
-from ..driver_api.FilterFields import FilterFieldEnum, ORM_DeviceDriverEnum
+from common.proto.context_pb2 import DeviceDriverEnum
+from ..driver_api.FilterFields import FilterFieldEnum
 
 TRUE_VALUES = {'T', 'TRUE', 'YES', '1'}
 DEVICE_EMULATED_ONLY = os.environ.get('DEVICE_EMULATED_ONLY')
@@ -47,7 +48,7 @@ DRIVERS.append(
                 #DeviceTypeEnum.PACKET_SWITCH,
             ],
             FilterFieldEnum.DRIVER: [
-                ORM_DeviceDriverEnum.UNDEFINED,
+                DeviceDriverEnum.DEVICEDRIVER_UNDEFINED,
             ],
         },
         #{
@@ -63,12 +64,12 @@ DRIVERS.append(
         #        DeviceTypeEnum.EMULATED_PACKET_SWITCH,
         #    ],
         #    FilterFieldEnum.DRIVER: [
-        #        ORM_DeviceDriverEnum.UNDEFINED,
-        #        ORM_DeviceDriverEnum.OPENCONFIG,
-        #        ORM_DeviceDriverEnum.TRANSPORT_API,
-        #        ORM_DeviceDriverEnum.P4,
-        #        ORM_DeviceDriverEnum.IETF_NETWORK_TOPOLOGY,
-        #        ORM_DeviceDriverEnum.ONF_TR_352,
+        #        DeviceDriverEnum.DEVICEDRIVER_UNDEFINED,
+        #        DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG,
+        #        DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API,
+        #        DeviceDriverEnum.DEVICEDRIVER_P4,
+        #        DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY,
+        #        DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352,
         #    ],
         #}
     ]))
@@ -80,7 +81,7 @@ if LOAD_ALL_DEVICE_DRIVERS:
             {
                 # Real Packet Router, specifying OpenConfig Driver => use OpenConfigDriver
                 FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.PACKET_ROUTER,
-                FilterFieldEnum.DRIVER     : ORM_DeviceDriverEnum.OPENCONFIG,
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG,
             }
         ]))
 
@@ -91,7 +92,7 @@ if LOAD_ALL_DEVICE_DRIVERS:
             {
                 # Real OLS, specifying TAPI Driver => use TransportApiDriver
                 FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.OPEN_LINE_SYSTEM,
-                FilterFieldEnum.DRIVER     : ORM_DeviceDriverEnum.TRANSPORT_API,
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API,
             }
         ]))
 
@@ -102,7 +103,7 @@ if LOAD_ALL_DEVICE_DRIVERS:
             {
                 # Real P4 Switch, specifying P4 Driver => use P4Driver
                 FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.P4_SWITCH,
-                FilterFieldEnum.DRIVER     : ORM_DeviceDriverEnum.P4,
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_P4,
             }
         ]))
 
@@ -112,7 +113,7 @@ if LOAD_ALL_DEVICE_DRIVERS:
         (IETFApiDriver, [
             {
                 FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM,
-                FilterFieldEnum.DRIVER     : ORM_DeviceDriverEnum.IETF_NETWORK_TOPOLOGY,
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY,
             }
         ]))
 
@@ -123,6 +124,6 @@ if LOAD_ALL_DEVICE_DRIVERS:
             {
                 # Close enough, it does optical switching
                 FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.XR_CONSTELLATION,
-                FilterFieldEnum.DRIVER     : ORM_DeviceDriverEnum.XR,
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_XR,
             }
         ]))
diff --git a/src/device/service/database/RelationModels.py b/src/device/service/drivers/emulated/Constants.py
similarity index 55%
rename from src/device/service/database/RelationModels.py
rename to src/device/service/drivers/emulated/Constants.py
index 0f6caa646f7548fe0d4aa23829183a132069c589..1c148c02b2a802b75c133f0b0c4ea20438190044 100644
--- a/src/device/service/database/RelationModels.py
+++ b/src/device/service/drivers/emulated/Constants.py
@@ -12,16 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
-from common.orm.fields.ForeignKeyField import ForeignKeyField
-from common.orm.fields.PrimaryKeyField import PrimaryKeyField
-from common.orm.model.Model import Model
-from .EndPointModel import EndPointMonitorModel
-from .KpiModel import KpiModel
+from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES
 
-LOGGER = logging.getLogger(__name__)
-
-class EndPointMonitorKpiModel(Model): # pylint: disable=abstract-method
-    pk = PrimaryKeyField()
-    endpoint_monitor_fk = ForeignKeyField(EndPointMonitorModel)
-    kpi_fk = ForeignKeyField(KpiModel)
+SPECIAL_RESOURCE_MAPPINGS = {
+    RESOURCE_ENDPOINTS        : '/endpoints',
+    RESOURCE_INTERFACES       : '/interfaces',
+    RESOURCE_NETWORK_INSTANCES: '/net-instances',
+}
diff --git a/src/device/service/drivers/emulated/EmulatedDriver.py b/src/device/service/drivers/emulated/EmulatedDriver.py
index 6029ff6604b2525b4509a24a2ec0d6f7c38513d0..4f5effce0a8b6156ce99a73b49b71f157d891286 100644
--- a/src/device/service/drivers/emulated/EmulatedDriver.py
+++ b/src/device/service/drivers/emulated/EmulatedDriver.py
@@ -12,117 +12,25 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import anytree, json, logging, math, pytz, queue, random, re, threading
+import anytree, json, logging, pytz, queue, re, threading
 from datetime import datetime, timedelta
-from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
+from typing import Any, Iterator, List, Optional, Tuple, Union
 from apscheduler.executors.pool import ThreadPoolExecutor
 from apscheduler.job import Job
 from apscheduler.jobstores.memory import MemoryJobStore
 from apscheduler.schedulers.background import BackgroundScheduler
 from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF
 from common.type_checkers.Checkers import chk_float, chk_length, chk_string, chk_type
-from device.service.database.KpiSampleType import ORM_KpiSampleTypeEnum, grpc_to_enum__kpi_sample_type
-from device.service.driver_api._Driver import (
-    RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES,
-    _Driver)
+from device.service.driver_api._Driver import _Driver
 from device.service.driver_api.AnyTreeTools import TreeNode, dump_subtree, get_subnode, set_subnode_value
+from .Constants import SPECIAL_RESOURCE_MAPPINGS
+from .SyntheticSamplingParameters import SyntheticSamplingParameters, do_sampling
+from .Tools import compose_resource_endpoint
 
 LOGGER = logging.getLogger(__name__)
 
-SPECIAL_RESOURCE_MAPPINGS = {
-    RESOURCE_ENDPOINTS        : '/endpoints',
-    RESOURCE_INTERFACES       : '/interfaces',
-    RESOURCE_NETWORK_INSTANCES: '/net-instances',
-}
-
-def compose_resource_endpoint(endpoint_data : Dict[str, Any]) -> Tuple[str, Any]:
-    endpoint_uuid = endpoint_data.get('uuid')
-    if endpoint_uuid is None: return None
-    endpoint_resource_path = SPECIAL_RESOURCE_MAPPINGS.get(RESOURCE_ENDPOINTS)
-    endpoint_resource_key = '{:s}/endpoint[{:s}]'.format(endpoint_resource_path, endpoint_uuid)
-
-    endpoint_type = endpoint_data.get('type')
-    if endpoint_type is None: return None
-
-    endpoint_sample_types = endpoint_data.get('sample_types')
-    if endpoint_sample_types is None: return None
-    sample_types = {}
-    for endpoint_sample_type in endpoint_sample_types:
-        try:
-            kpi_sample_type : ORM_KpiSampleTypeEnum = grpc_to_enum__kpi_sample_type(endpoint_sample_type)
-        except: # pylint: disable=bare-except
-            LOGGER.warning('Unknown EndpointSampleType({:s}) for Endpoint({:s}). Ignoring and continuing...'.format(
-                str(endpoint_sample_type), str(endpoint_data)))
-            continue
-        metric_name = kpi_sample_type.name.lower()
-        monitoring_resource_key = '{:s}/state/{:s}'.format(endpoint_resource_key, metric_name)
-        sample_types[endpoint_sample_type] = monitoring_resource_key
-
-    endpoint_resource_value = {'uuid': endpoint_uuid, 'type': endpoint_type, 'sample_types': sample_types}
-    return endpoint_resource_key, endpoint_resource_value
-
-RE_GET_ENDPOINT_METRIC = re.compile(r'.*\/endpoint\[([^\]]+)\]\/state\/(.*)')
 RE_GET_ENDPOINT_FROM_INTERFACE = re.compile(r'.*\/interface\[([^\]]+)\].*')
 
-class SyntheticSamplingParameters:
-    def __init__(self) -> None:
-        self.__lock = threading.Lock()
-        self.__data = {}
-        self.__configured_endpoints = set()
-
-    def set_endpoint_configured(self, endpoint_uuid : str):
-        with self.__lock:
-            self.__configured_endpoints.add(endpoint_uuid)
-
-    def unset_endpoint_configured(self, endpoint_uuid : str):
-        with self.__lock:
-            self.__configured_endpoints.discard(endpoint_uuid)
-
-    def get(self, resource_key : str) -> Tuple[float, float, float, float]:
-        with self.__lock:
-            match = RE_GET_ENDPOINT_METRIC.match(resource_key)
-            if match is None:
-                msg = '[SyntheticSamplingParameters:get] unable to extract endpoint-metric from resource_key "{:s}"'
-                LOGGER.error(msg.format(resource_key))
-                return (0, 0, 1, 0, 0)
-            endpoint_uuid = match.group(1)
-
-            # If endpoint is not configured, generate a flat synthetic traffic aligned at 0
-            if endpoint_uuid not in self.__configured_endpoints: return (0, 0, 1, 0, 0)
-
-            metric = match.group(2)
-            metric_sense = metric.lower().replace('packets_', '').replace('bytes_', '')
-
-            msg = '[SyntheticSamplingParameters:get] resource_key={:s}, endpoint_uuid={:s}, metric={:s}, metric_sense={:s}'
-            LOGGER.info(msg.format(resource_key, endpoint_uuid, metric, metric_sense))
-
-            parameters_key = '{:s}-{:s}'.format(endpoint_uuid, metric_sense)
-            parameters = self.__data.get(parameters_key)
-            if parameters is not None: return parameters
-
-            # assume packets
-            amplitude  = 1.e7 * random.random()
-            phase      = 60 * random.random()
-            period     = 3600 * random.random()
-            offset     = 1.e8 * random.random() + amplitude
-            avg_bytes_per_packet = random.randint(500, 1500)
-            parameters = (amplitude, phase, period, offset, avg_bytes_per_packet)
-            return self.__data.setdefault(parameters_key, parameters)
-
-def do_sampling(
-        synthetic_sampling_parameters : SyntheticSamplingParameters, resource_key : str, out_samples : queue.Queue
-    ):
-    amplitude, phase, period, offset, avg_bytes_per_packet = synthetic_sampling_parameters.get(resource_key)
-    if 'bytes' in resource_key.lower():
-        # convert to bytes
-        amplitude = avg_bytes_per_packet * amplitude
-        offset = avg_bytes_per_packet * offset
-    timestamp = datetime.timestamp(datetime.utcnow())
-    waveform  = amplitude * math.sin(2 * math.pi * timestamp / period + phase) + offset
-    noise     = amplitude * random.random()
-    value     = abs(0.95 * waveform + 0.05 * noise)
-    out_samples.put_nowait((timestamp, resource_key, value))
-
 HISTOGRAM_BUCKETS = (
     # .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, INF
     0.0001, 0.00025, 0.00050, 0.00075,
@@ -240,7 +148,7 @@ class EmulatedDriver(_Driver):
 
                 try:
                     resource_value = json.loads(resource_value)
-                except: # pylint: disable=broad-except
+                except: # pylint: disable=bare-except
                     pass
 
                 set_subnode_value(resolver, self.__running, resource_path, resource_value)
diff --git a/src/device/service/drivers/emulated/SyntheticSamplingParameters.py b/src/device/service/drivers/emulated/SyntheticSamplingParameters.py
new file mode 100644
index 0000000000000000000000000000000000000000..65feb9d16e72dd55f8f7ffdf5a2e1bee11f94c81
--- /dev/null
+++ b/src/device/service/drivers/emulated/SyntheticSamplingParameters.py
@@ -0,0 +1,86 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, math, queue, random, re, threading
+from datetime import datetime
+from typing import Optional, Tuple
+
+LOGGER = logging.getLogger(__name__)
+
+RE_GET_ENDPOINT_METRIC = re.compile(r'.*\/endpoint\[([^\]]+)\]\/state\/(.*)')
+
+MSG_ERROR_PARSE = '[get] unable to extract endpoint-metric from monitoring_resource_key "{:s}"'
+MSG_INFO = '[get] monitoring_resource_key={:s}, endpoint_uuid={:s}, metric={:s}, metric_sense={:s}'
+
+class SyntheticSamplingParameters:
+    def __init__(self) -> None:
+        self.__lock = threading.Lock()
+        self.__data = {}
+        self.__configured_endpoints = set()
+
+    def set_endpoint_configured(self, endpoint_uuid : str):
+        with self.__lock:
+            self.__configured_endpoints.add(endpoint_uuid)
+
+    def unset_endpoint_configured(self, endpoint_uuid : str):
+        with self.__lock:
+            self.__configured_endpoints.discard(endpoint_uuid)
+
+    def get(self, monitoring_resource_key : str) -> Optional[Tuple[float, float, float, float, float]]:
+        with self.__lock:
+            match = RE_GET_ENDPOINT_METRIC.match(monitoring_resource_key)
+            if match is None:
+                LOGGER.error(MSG_ERROR_PARSE.format(monitoring_resource_key))
+                return None
+            endpoint_uuid = match.group(1)
+
+            # If endpoint is not configured, generate a flat synthetic traffic aligned at 0
+            if endpoint_uuid not in self.__configured_endpoints: return (0, 0, 1, 0, 0)
+
+            metric = match.group(2)
+            metric_sense = metric.lower().replace('packets_', '').replace('bytes_', '')
+
+            LOGGER.info(MSG_INFO.format(monitoring_resource_key, endpoint_uuid, metric, metric_sense))
+
+            parameters_key = '{:s}-{:s}'.format(endpoint_uuid, metric_sense)
+            parameters = self.__data.get(parameters_key)
+            if parameters is not None: return parameters
+
+            # assume packets
+            amplitude  = 1.e7 * random.random()
+            phase      = 60 * random.random()
+            period     = 3600 * random.random()
+            offset     = 1.e8 * random.random() + amplitude
+            avg_bytes_per_packet = random.randint(500, 1500)
+            parameters = (amplitude, phase, period, offset, avg_bytes_per_packet)
+            return self.__data.setdefault(parameters_key, parameters)
+
+def do_sampling(
+    synthetic_sampling_parameters : SyntheticSamplingParameters, monitoring_resource_key : str,
+    out_samples : queue.Queue
+) -> None:
+    parameters = synthetic_sampling_parameters.get(monitoring_resource_key)
+    if parameters is None: return
+    amplitude, phase, period, offset, avg_bytes_per_packet = parameters
+
+    if 'bytes' in monitoring_resource_key.lower():
+        # convert to bytes
+        amplitude = avg_bytes_per_packet * amplitude
+        offset = avg_bytes_per_packet * offset
+
+    timestamp = datetime.timestamp(datetime.utcnow())
+    waveform  = amplitude * math.sin(2 * math.pi * timestamp / period + phase) + offset
+    noise     = amplitude * random.random()
+    value     = abs(0.95 * waveform + 0.05 * noise)
+    out_samples.put_nowait((timestamp, monitoring_resource_key, value))
diff --git a/src/device/service/drivers/emulated/Tools.py b/src/device/service/drivers/emulated/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..14672c203fd86da46c2ac5ddda39860ab67e68db
--- /dev/null
+++ b/src/device/service/drivers/emulated/Tools.py
@@ -0,0 +1,46 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Any, Dict, Tuple
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from device.service.driver_api._Driver import RESOURCE_ENDPOINTS
+from .Constants import SPECIAL_RESOURCE_MAPPINGS
+
+LOGGER = logging.getLogger(__name__)
+
+def compose_resource_endpoint(endpoint_data : Dict[str, Any]) -> Tuple[str, Any]:
+    endpoint_uuid = endpoint_data.get('uuid')
+    if endpoint_uuid is None: return None
+    endpoint_resource_path = SPECIAL_RESOURCE_MAPPINGS.get(RESOURCE_ENDPOINTS)
+    endpoint_resource_key = '{:s}/endpoint[{:s}]'.format(endpoint_resource_path, endpoint_uuid)
+
+    endpoint_type = endpoint_data.get('type')
+    if endpoint_type is None: return None
+
+    endpoint_sample_types = endpoint_data.get('sample_types')
+    if endpoint_sample_types is None: return None
+
+    sample_types = {}
+    for endpoint_sample_type in endpoint_sample_types:
+        try:
+            metric_name = KpiSampleType.Name(endpoint_sample_type).lower().replace('kpisampletype_', '')
+        except: # pylint: disable=bare-except
+            LOGGER.warning('Unsupported EndPointSampleType({:s})'.format(str(endpoint_sample_type)))
+            continue
+        monitoring_resource_key = '{:s}/state/{:s}'.format(endpoint_resource_key, metric_name)
+        sample_types[endpoint_sample_type] = monitoring_resource_key
+
+    endpoint_resource_value = {'uuid': endpoint_uuid, 'type': endpoint_type, 'sample_types': sample_types}
+    return endpoint_resource_key, endpoint_resource_value
diff --git a/src/device/service/drivers/openconfig/templates/EndPoints.py b/src/device/service/drivers/openconfig/templates/EndPoints.py
index 9bd2e75ac4da0965c91b9154046694fd352dc4f6..e831d7738b3a09ae99773e1b882650554cfe5d78 100644
--- a/src/device/service/drivers/openconfig/templates/EndPoints.py
+++ b/src/device/service/drivers/openconfig/templates/EndPoints.py
@@ -14,7 +14,7 @@
 
 import logging, lxml.etree as ET
 from typing import Any, Dict, List, Tuple
-from device.service.database.KpiSampleType import ORM_KpiSampleTypeEnum
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from .Namespace import NAMESPACES
 from .Tools import add_value_from_collection, add_value_from_tag
 
@@ -47,10 +47,10 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
         if 'type' not in endpoint: endpoint['type'] = '-'
 
         sample_types = {
-            ORM_KpiSampleTypeEnum.BYTES_RECEIVED.value     : XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'in-octets' ),
-            ORM_KpiSampleTypeEnum.BYTES_TRANSMITTED.value  : XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'out-octets'),
-            ORM_KpiSampleTypeEnum.PACKETS_RECEIVED.value   : XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'in-pkts'   ),
-            ORM_KpiSampleTypeEnum.PACKETS_TRANSMITTED.value: XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'out-pkts'  ),
+            KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED     : XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'in-octets' ),
+            KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED  : XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'out-octets'),
+            KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED   : XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'in-pkts'   ),
+            KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED: XPATH_IFACE_COUNTER.format(endpoint['uuid'], 'out-pkts'  ),
         }
         add_value_from_collection(endpoint, 'sample_types', sample_types)
 
diff --git a/src/device/service/monitoring/MonitoringLoop.py b/src/device/service/monitoring/MonitoringLoop.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec17a3ef6763f52831284b8af5bcbed534755525
--- /dev/null
+++ b/src/device/service/monitoring/MonitoringLoop.py
@@ -0,0 +1,43 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import queue, threading
+from device.service.driver_api._Driver import _Driver
+
+class MonitoringLoop:
+    def __init__(self, device_uuid : str, driver : _Driver, samples_queue : queue.Queue) -> None:
+        self._device_uuid = device_uuid
+        self._driver = driver
+        self._samples_queue = samples_queue
+        self._running = threading.Event()
+        self._terminate = threading.Event()
+        self._samples_stream = self._driver.GetState(blocking=True, terminate=self._terminate)
+        self._collector_thread = threading.Thread(target=self._collect, daemon=True)
+
+    def _collect(self) -> None:
+        for sample in self._samples_stream:
+            if self._terminate.is_set(): break
+            sample = (self._device_uuid, *sample)
+            self._samples_queue.put_nowait(sample)
+
+    def start(self):
+        self._collector_thread.start()
+        self._running.set()
+
+    @property
+    def is_running(self): return self._running.is_set()
+
+    def stop(self):
+        self._terminate.set()
+        self._collector_thread.join()
diff --git a/src/device/service/monitoring/MonitoringLoops.py b/src/device/service/monitoring/MonitoringLoops.py
new file mode 100644
index 0000000000000000000000000000000000000000..5763951fb2075e1975688eda0e49d24e10b0f697
--- /dev/null
+++ b/src/device/service/monitoring/MonitoringLoops.py
@@ -0,0 +1,170 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, queue, threading
+from typing import Dict, Optional, Tuple, Union
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.proto.monitoring_pb2 import Kpi
+from monitoring.client.MonitoringClient import MonitoringClient
+from ..driver_api._Driver import _Driver
+from .MonitoringLoop import MonitoringLoop
+
+LOGGER = logging.getLogger(__name__)
+
+QUEUE_GET_WAIT_TIMEOUT = 0.5
+
+def value_to_grpc(value : Union[bool, float, int, str]) -> Dict:
+    if isinstance(value, int):
+        kpi_value_field_name = 'int64Val'
+        kpi_value_field_cast = int
+    elif isinstance(value, float):
+        kpi_value_field_name = 'floatVal'
+        kpi_value_field_cast = float
+    elif isinstance(value, bool):
+        kpi_value_field_name = 'boolVal'
+        kpi_value_field_cast = bool
+    else:
+        kpi_value_field_name = 'stringVal'
+        kpi_value_field_cast = str
+
+    return {kpi_value_field_name: kpi_value_field_cast(value)}
+
+TYPE_TARGET_KEY = Tuple[str, str]               # (device_uuid, monitoring_resource_key)
+TYPE_TARGET_KPI = Tuple[str, float, float]      # (kpi_uuid, sampling_duration, sampling_interval)
+TYPE_KPI_DETAIL = Tuple[str, str, float, float] # (device_uuid, monitoring_resource_key,
+                                                #  sampling_duration, sampling_interval)
+
+class MonitoringLoops:
+    def __init__(self) -> None:
+        self._monitoring_client = MonitoringClient()
+        self._samples_queue = queue.Queue()
+        self._running = threading.Event()
+        self._terminate = threading.Event()
+
+        self._lock_device_endpoint = threading.Lock()
+        self._device_endpoint_sampletype__to__resource_key : Dict[Tuple[str, str, int], str] = dict()
+
+        self._lock_monitoring_loop = threading.Lock()
+        self._device_uuid__to__monitoring_loop : Dict[str, MonitoringLoop] = dict()
+
+        self._lock_kpis = threading.Lock()
+        self._target_to_kpi : Dict[TYPE_TARGET_KEY, TYPE_TARGET_KPI] = dict()
+        self._kpi_to_detail : Dict[str, TYPE_KPI_DETAIL] = dict()
+        
+        self._exporter_thread = threading.Thread(target=self._export, daemon=True)
+
+    def add_device(self, device_uuid : str, driver : _Driver) -> None:
+        with self._lock_monitoring_loop:
+            monitoring_loop = self._device_uuid__to__monitoring_loop.get(device_uuid)
+            if (monitoring_loop is not None) and monitoring_loop.is_running: return
+            monitoring_loop = MonitoringLoop(device_uuid, driver, self._samples_queue)
+            self._device_uuid__to__monitoring_loop[device_uuid] = monitoring_loop
+            monitoring_loop.start()
+
+    def remove_device(self, device_uuid : str) -> None:
+        with self._lock_monitoring_loop:
+            monitoring_loop = self._device_uuid__to__monitoring_loop.get(device_uuid)
+            if monitoring_loop is None: return
+            if monitoring_loop.is_running: monitoring_loop.stop()
+            self._device_uuid__to__monitoring_loop.pop(device_uuid, None)
+
+    def add_resource_key(
+        self, device_uuid : str, endpoint_uuid : str, kpi_sample_type : KpiSampleType, resource_key : str
+    ) -> None:
+        with self._lock_device_endpoint:
+            key = (device_uuid, endpoint_uuid, kpi_sample_type)
+            self._device_endpoint_sampletype__to__resource_key[key] = resource_key
+
+    def get_resource_key(
+        self, device_uuid : str, endpoint_uuid : str, kpi_sample_type : KpiSampleType
+    ) -> Optional[str]:
+        with self._lock_device_endpoint:
+            key = (device_uuid, endpoint_uuid, kpi_sample_type)
+            return self._device_endpoint_sampletype__to__resource_key.get(key)
+
+    def remove_resource_key(
+        self, device_uuid : str, endpoint_uuid : str, kpi_sample_type : KpiSampleType
+    ) -> None:
+        with self._lock_device_endpoint:
+            key = (device_uuid, endpoint_uuid, kpi_sample_type)
+            self._device_endpoint_sampletype__to__resource_key.pop(key, None)
+
+    def add_kpi(
+        self, device_uuid : str, monitoring_resource_key : str, kpi_uuid : str, sampling_duration : float,
+        sampling_interval : float
+    ) -> None:
+        with self._lock_kpis:
+            kpi_key = (device_uuid, monitoring_resource_key)
+            kpi_values = (kpi_uuid, sampling_duration, sampling_interval)
+            self._target_to_kpi[kpi_key] = kpi_values
+
+            kpi_details = (device_uuid, monitoring_resource_key, sampling_duration, sampling_interval)
+            self._kpi_to_detail[kpi_uuid] = kpi_details
+
+    def get_kpi_by_uuid(self, kpi_uuid : str) -> Optional[TYPE_KPI_DETAIL]:
+        with self._lock_kpis:
+            return self._kpi_to_detail.get(kpi_uuid)
+
+    def get_kpi_by_metric(
+        self, device_uuid : str, monitoring_resource_key : str
+    ) -> Optional[TYPE_TARGET_KPI]:
+        with self._lock_kpis:
+            kpi_key = (device_uuid, monitoring_resource_key)
+            return self._target_to_kpi.get(kpi_key)
+
+    def remove_kpi(self, kpi_uuid : str) -> None:
+        with self._lock_kpis:
+            kpi_details = self._kpi_to_detail.pop(kpi_uuid, None)
+            if kpi_details is None: return
+            kpi_key = kpi_details[0:2] # (device_uuid, monitoring_resource_key, _, _)
+            self._target_to_kpi.pop(kpi_key, None)
+
+    def start(self):
+        self._exporter_thread.start()
+
+    @property
+    def is_running(self): return self._running.is_set()
+
+    def stop(self):
+        self._terminate.set()
+        self._exporter_thread.join()
+
+    def _export(self) -> None:
+        self._running.set()
+        while not self._terminate.is_set():
+            try:
+                sample = self._samples_queue.get(block=True, timeout=QUEUE_GET_WAIT_TIMEOUT)
+                #LOGGER.debug('[MonitoringLoops:_export] sample={:s}'.format(str(sample)))
+            except queue.Empty:
+                continue
+
+            device_uuid, timestamp, monitoring_resource_key, value = sample
+
+            kpi_details = self.get_kpi_by_metric(device_uuid, monitoring_resource_key)
+            if kpi_details is None:
+                MSG = 'Kpi for Device({:s})/MonitoringResourceKey({:s}) not found'
+                LOGGER.warning(MSG.format(str(device_uuid), str(monitoring_resource_key)))
+                continue
+            kpi_uuid,_,_ = kpi_details
+
+            try:
+                self._monitoring_client.IncludeKpi(Kpi(**{
+                    'kpi_id'   : {'kpi_id': {'uuid': kpi_uuid}},
+                    'timestamp': {'timestamp': timestamp},
+                    'kpi_value': value_to_grpc(value),
+                }))
+            except: # pylint: disable=bare-except
+                LOGGER.exception('Unable to format/send Kpi')
+
+        self._running.clear()
diff --git a/src/context/service/rest_server/__init__.py b/src/device/service/monitoring/__init__.py
similarity index 100%
rename from src/context/service/rest_server/__init__.py
rename to src/device/service/monitoring/__init__.py
diff --git a/src/device/tests/CommonObjects.py b/src/device/tests/CommonObjects.py
index 61f0b44cd5715ac5e631c9e552fc61e7caa524d0..5613d22b2139d2770f2582d24000910b9d3deab3 100644
--- a/src/device/tests/CommonObjects.py
+++ b/src/device/tests/CommonObjects.py
@@ -12,19 +12,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Topology import json_topology, json_topology_id
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 
 # ----- Topology -------------------------------------------------------------------------------------------------------
-TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
-TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
+TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID)
+TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID)
 
 
 # ----- KPI Sample Types -----------------------------------------------------------------------------------------------
diff --git a/src/device/tests/Device_Emulated.py b/src/device/tests/Device_Emulated.py
index 7b8f15918146fcde9e920825e42e2985deeaee24..cf564b0bf380798f329df11e9598126ae9e456e7 100644
--- a/src/device/tests/Device_Emulated.py
+++ b/src/device/tests/Device_Emulated.py
@@ -18,7 +18,7 @@ from common.tools.object_factory.Device import (
     json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, json_device_id)
 from device.tests.CommonObjects import PACKET_PORT_SAMPLE_TYPES
 
-DEVICE_EMU_UUID     = 'EMULATED'
+DEVICE_EMU_UUID     = 'R1-EMU'
 DEVICE_EMU_ID       = json_device_id(DEVICE_EMU_UUID)
 DEVICE_EMU          = json_device_emulated_packet_router_disabled(DEVICE_EMU_UUID)
 DEVICE_EMU_EP_UUIDS = ['EP1', 'EP2', 'EP3', 'EP4']
diff --git a/src/device/tests/test_unitary_emulated.py b/src/device/tests/test_unitary_emulated.py
index 745c25c1eba679dc67e0ed9e04f38eb0ae8c3af4..8a1b30a6ec01ec004c92be97d27e318e427f4cbe 100644
--- a/src/device/tests/test_unitary_emulated.py
+++ b/src/device/tests/test_unitary_emulated.py
@@ -168,12 +168,14 @@ def test_device_emulated_configure(
         config_rule = (
             ConfigActionEnum.Name(config_rule['action']), config_rule['custom']['resource_key'],
             json.loads(json.dumps(config_rule['custom']['resource_value'])))
+        #LOGGER.info('config_rule: {:s} {:s} = {:s}'.format(*config_rule))
         assert config_rule in config_rules
     for config_rule in DEVICE_EMU_CONFIG_ADDRESSES:
         assert 'custom' in config_rule
         config_rule = (
             ConfigActionEnum.Name(config_rule['action']), config_rule['custom']['resource_key'],
             json.loads(json.dumps(config_rule['custom']['resource_value'])))
+        #LOGGER.info('config_rule: {:s} {:s} = {:s}'.format(*config_rule))
         assert config_rule in config_rules
 
     # Try to reconfigure...
@@ -222,6 +224,7 @@ def test_device_emulated_configure(
         config_rule = (
             ConfigActionEnum.Name(config_rule['action']), config_rule['custom']['resource_key'],
             config_rule['custom']['resource_value'])
+        #LOGGER.info('config_rule: {:s} {:s} = {:s}'.format(*config_rule))
         assert config_rule in config_rules
 
 
diff --git a/src/dlt/.gitlab-ci.yml b/src/dlt/.gitlab-ci.yml
index 3c2013f50904eb9cd366bf3e3b3cfce6d10c6fd6..5d9875ef9e1f5373072e3321397499bc91bb2328 100644
--- a/src/dlt/.gitlab-ci.yml
+++ b/src/dlt/.gitlab-ci.yml
@@ -109,20 +109,32 @@ unit test dlt-connector:
     - build dlt
   before_script:
     - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
-    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge --subnet=172.28.0.0/24 --gateway=172.28.0.254 teraflowbridge; fi
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge teraflowbridge; fi
     - if docker container ls | grep ${IMAGE_NAME}-connector; then docker rm -f ${IMAGE_NAME}-connector; else echo "${IMAGE_NAME}-connector image is not in the system"; fi
     - if docker container ls | grep ${IMAGE_NAME}-gateway; then docker rm -f ${IMAGE_NAME}-gateway; else echo "${IMAGE_NAME}-gateway image is not in the system"; fi
   script:
     - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-connector:$IMAGE_TAG"
     - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG"
-    - docker run --name ${IMAGE_NAME}-gateway -d -p 50051:50051 -v "$PWD/src/${IMAGE_NAME}/gateway/tests:/opt/results" --network=teraflowbridge --ip 172.28.0.1 $CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG
+    - >
+      docker run --name ${IMAGE_NAME}-gateway -d -p 50051:50051 --network=teraflowbridge
+      --volume "$PWD/src/${IMAGE_NAME}/gateway/tests:/opt/results"
+      $CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG
+    - DLT_GATEWAY_HOST=$(docker inspect ${IMAGE_NAME}-gateway --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $DLT_GATEWAY_HOST
     - sleep 1
-    - docker run --name ${IMAGE_NAME}-connector -d -p 8080:8080 --env "DLT_GATEWAY_HOST=172.28.0.1" --env "DLT_GATEWAY_PORT=50051" -v "$PWD/src/${IMAGE_NAME}/connector/tests:/opt/results" --network=teraflowbridge --ip 172.28.0.2 $CI_REGISTRY_IMAGE/${IMAGE_NAME}-connector:$IMAGE_TAG
+    - >
+      docker run --name ${IMAGE_NAME}-connector -d -p 8080:8080 --network=teraflowbridge
+      --volume "$PWD/src/${IMAGE_NAME}/connector/tests:/opt/results"
+      --env "DLT_GATEWAY_HOST=${DLT_GATEWAY_HOST}"
+      --env "DLT_GATEWAY_PORT=50051"
+      $CI_REGISTRY_IMAGE/${IMAGE_NAME}-connector:$IMAGE_TAG
     - sleep 5
     - docker ps -a
     - docker logs ${IMAGE_NAME}-connector
     - docker logs ${IMAGE_NAME}-gateway
-    - docker exec -i ${IMAGE_NAME}-connector bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/connector/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}-connector_report.xml"
+    - >
+      docker exec -i ${IMAGE_NAME}-connector bash -c
+      "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/connector/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}-connector_report.xml"
     - docker exec -i ${IMAGE_NAME}-connector bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
diff --git a/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py b/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py
index 8973ae621c1291f8ed6e2673f0c64b59712143ee..c569d75c3d010805bccc9c611e9f33a27a637987 100644
--- a/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py
+++ b/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py
@@ -14,7 +14,7 @@
 
 import grpc, json, logging, threading
 from typing import Any, Dict, Set
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME
 from common.proto.context_pb2 import ContextId, Device, EventTypeEnum, Link, Slice, TopologyId
 from common.proto.dlt_connector_pb2 import DltSliceId
 from common.proto.dlt_gateway_pb2 import DltRecordEvent, DltRecordOperationEnum, DltRecordTypeEnum
@@ -35,7 +35,7 @@ LOGGER = logging.getLogger(__name__)
 
 GET_EVENT_TIMEOUT = 0.5
 
-ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
 class Clients:
     def __init__(self) -> None:
@@ -66,9 +66,9 @@ class DltEventDispatcher(threading.Thread):
 
     def run(self) -> None:
         clients = Clients()
-        create_context(clients.context_client, DEFAULT_CONTEXT_UUID)
-        create_topology(clients.context_client, DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID)
-        create_topology(clients.context_client, DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID)
+        create_context(clients.context_client, DEFAULT_CONTEXT_NAME)
+        create_topology(clients.context_client, DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME)
+        create_topology(clients.context_client, DEFAULT_CONTEXT_NAME, INTERDOMAIN_TOPOLOGY_NAME)
 
         dlt_events_collector = DltEventsCollector(clients.dlt_gateway_client, log_events_received=True)
         dlt_events_collector.start()
@@ -81,8 +81,8 @@ class DltEventDispatcher(threading.Thread):
             local_domain_uuids = {
                 topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids
             }
-            local_domain_uuids.discard(DEFAULT_TOPOLOGY_UUID)
-            local_domain_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID)
+            local_domain_uuids.discard(DEFAULT_TOPOLOGY_NAME)
+            local_domain_uuids.discard(INTERDOMAIN_TOPOLOGY_NAME)
 
             self.dispatch_event(clients, local_domain_uuids, event)
 
@@ -118,13 +118,13 @@ class DltEventDispatcher(threading.Thread):
             LOGGER.info('[_dispatch_device] record={:s}'.format(grpc_message_to_json_string(record)))
 
             create_context(clients.context_client, domain_uuid)
-            create_topology(clients.context_client, domain_uuid, DEFAULT_TOPOLOGY_UUID)
+            create_topology(clients.context_client, domain_uuid, DEFAULT_TOPOLOGY_NAME)
             device = Device(**json.loads(record.data_json))
             clients.context_client.SetDevice(device)
             device_uuid = device.device_id.device_uuid.uuid # pylint: disable=no-member
-            add_device_to_topology(clients.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID, device_uuid)
+            add_device_to_topology(clients.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME, device_uuid)
             domain_context_id = ContextId(**json_context_id(domain_uuid))
-            add_device_to_topology(clients.context_client, domain_context_id, DEFAULT_TOPOLOGY_UUID, device_uuid)
+            add_device_to_topology(clients.context_client, domain_context_id, DEFAULT_TOPOLOGY_NAME, device_uuid)
         elif event_type in {EventTypeEnum.EVENTTYPE_DELETE}:
             raise NotImplementedError('Delete Device')
 
@@ -148,7 +148,7 @@ class DltEventDispatcher(threading.Thread):
             link = Link(**json.loads(record.data_json))
             clients.context_client.SetLink(link)
             link_uuid = link.link_id.link_uuid.uuid # pylint: disable=no-member
-            add_link_to_topology(clients.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID, link_uuid)
+            add_link_to_topology(clients.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME, link_uuid)
         elif event_type in {EventTypeEnum.EVENTTYPE_DELETE}:
             raise NotImplementedError('Delete Link')
 
@@ -165,7 +165,7 @@ class DltEventDispatcher(threading.Thread):
         context_uuid = slice_.slice_id.context_id.context_uuid.uuid
         owner_uuid = slice_.slice_owner.owner_uuid.uuid
         create_context(clients.context_client, context_uuid)
-        create_topology(clients.context_client, context_uuid, DEFAULT_TOPOLOGY_UUID)
+        create_topology(clients.context_client, context_uuid, DEFAULT_TOPOLOGY_NAME)
 
         if domain_uuid in local_domain_uuids:
             # it is for "me"
diff --git a/src/interdomain/service/InterdomainServiceServicerImpl.py b/src/interdomain/service/InterdomainServiceServicerImpl.py
index c0c3514515a24fbbf6058aa6abc7a1a6ccacf1b5..6844393fe83c6a861f5f8957342e9b9ef0909700 100644
--- a/src/interdomain/service/InterdomainServiceServicerImpl.py
+++ b/src/interdomain/service/InterdomainServiceServicerImpl.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import grpc, logging, uuid
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.proto.context_pb2 import AuthenticationResult, Slice, SliceId, SliceStatusEnum, TeraFlowController, TopologyId
 from common.proto.interdomain_pb2_grpc import InterdomainServiceServicer
 from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
@@ -95,17 +95,15 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer):
                 LOGGER.info('[loop] [local] domain_uuid={:s} is_local_domain={:s} slice_uuid={:s}'.format(
                     str(domain_uuid), str(is_local_domain), str(slice_uuid)))
 
-                # local slices always in DEFAULT_CONTEXT_UUID
+                # local slices always in DEFAULT_CONTEXT_NAME
                 #context_uuid = request.slice_id.context_id.context_uuid.uuid
-                context_uuid = DEFAULT_CONTEXT_UUID
+                context_uuid = DEFAULT_CONTEXT_NAME
                 endpoint_ids = map_abstract_endpoints_to_real(context_client, domain_uuid, endpoint_ids)
                 sub_slice = compose_slice(
                     context_uuid, slice_uuid, endpoint_ids, constraints=request.slice_constraints,
                     config_rules=request.slice_config.config_rules)
                 LOGGER.info('[loop] [local] sub_slice={:s}'.format(grpc_message_to_json_string(sub_slice)))
                 sub_slice_id = slice_client.CreateSlice(sub_slice)
-                if sub_slice_id != sub_slice.slice_id: # pylint: disable=no-member
-                    raise Exception('Local Slice creation failed. Wrong Slice Id was returned')
             else:
                 slice_uuid = request.slice_id.slice_uuid.uuid
                 LOGGER.info('[loop] [remote] domain_uuid={:s} is_local_domain={:s} slice_uuid={:s}'.format(
@@ -113,7 +111,7 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer):
 
                 # create context/topology for the remote domains where we are creating slices
                 create_context(context_client, domain_uuid)
-                create_topology(context_client, domain_uuid, DEFAULT_TOPOLOGY_UUID)
+                create_topology(context_client, domain_uuid, DEFAULT_TOPOLOGY_NAME)
 
                 sub_slice = compose_slice(
                     domain_uuid, slice_uuid, endpoint_ids, constraints=request.slice_constraints,
diff --git a/src/interdomain/service/RemoteDomainClients.py b/src/interdomain/service/RemoteDomainClients.py
index 0aaadfeff0aa05ab0d356b00069a6ec86e89926d..6eb2a9c062807d21377e3505ff13e1fb21603942 100644
--- a/src/interdomain/service/RemoteDomainClients.py
+++ b/src/interdomain/service/RemoteDomainClients.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import logging, socket
-from common.Constants import DEFAULT_CONTEXT_UUID, ServiceNameEnum
+from common.Constants import DEFAULT_CONTEXT_NAME, ServiceNameEnum
 from common.Settings import get_service_host, get_service_port_grpc
 from common.proto.context_pb2 import TeraFlowController
 from interdomain.client.InterdomainClient import InterdomainClient
@@ -25,7 +25,7 @@ class RemoteDomainClients:
         self.peer_domain = {}
 
     def add_peer(
-            self, domain_name : str, host : str, port : int, context_uuid : str = DEFAULT_CONTEXT_UUID
+            self, domain_name : str, host : str, port : int, context_uuid : str = DEFAULT_CONTEXT_NAME
         ) -> None:
         while True:
             try:
@@ -36,7 +36,7 @@ class RemoteDomainClients:
 
         interdomain_client = InterdomainClient(host=host, port=port)
         request = TeraFlowController()
-        request.context_id.context_uuid.uuid = DEFAULT_CONTEXT_UUID # pylint: disable=no-member
+        request.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME # pylint: disable=no-member
         request.ip_address = get_service_host(ServiceNameEnum.INTERDOMAIN)
         request.port = int(get_service_port_grpc(ServiceNameEnum.INTERDOMAIN))
 
diff --git a/src/interdomain/service/Tools.py b/src/interdomain/service/Tools.py
index fb6371603ea90437437541bb995a59813764d9ef..472132adb996c56673921e30bab342c6c4cd9413 100644
--- a/src/interdomain/service/Tools.py
+++ b/src/interdomain/service/Tools.py
@@ -14,7 +14,7 @@
 
 import json, logging
 from typing import List, Optional, Tuple
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME
 from common.proto.context_pb2 import (
     ConfigRule, Constraint, ContextId, Device, Empty, EndPointId, Slice, SliceStatusEnum)
 from common.tools.context_queries.CheckType import device_type_is_network, endpoint_type_is_border
@@ -32,12 +32,12 @@ def compute_slice_owner(
 ) -> Optional[str]:
     traversed_domain_uuids = {traversed_domain[0] for traversed_domain in traversed_domains}
 
-    existing_topology_ids = context_client.ListTopologyIds(ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)))
+    existing_topology_ids = context_client.ListTopologyIds(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)))
     existing_topology_uuids = {
         topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids
     }
-    existing_topology_uuids.discard(DEFAULT_TOPOLOGY_UUID)
-    existing_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID)
+    existing_topology_uuids.discard(DEFAULT_TOPOLOGY_NAME)
+    existing_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_NAME)
 
     candidate_owner_uuids = traversed_domain_uuids.intersection(existing_topology_uuids)
     if len(candidate_owner_uuids) != 1:
diff --git a/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py b/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py
index f3818578186360365e3b828810d942def5722cea..00c0b8d77ca48cc01415452af235332c0475c063 100644
--- a/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py
+++ b/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py
@@ -108,16 +108,14 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer):
             slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2'
             slice_endpoint_id.endpoint_uuid.uuid = '2/1'
 
-        local_slice_reply = slice_client.CreateSlice(local_slice_request)
-        if local_slice_reply != local_slice_request.slice_id: # pylint: disable=no-member
-            raise Exception('Local Slice creation failed. Wrong Slice Id was returned')
+        local_slice_id_reply = slice_client.CreateSlice(local_slice_request)
 
         subslice_id = reply.slice_subslice_ids.add()
-        subslice_id.context_id.context_uuid.uuid = local_slice_request.slice_id.context_id.context_uuid.uuid
-        subslice_id.slice_uuid.uuid = local_slice_request.slice_id.slice_uuid.uuid
+        subslice_id.context_id.context_uuid.uuid = local_slice_id_reply.context_id.context_uuid.uuid
+        subslice_id.slice_uuid.uuid = local_slice_id_reply.slice_uuid.uuid
 
-        context_client.SetSlice(reply)
-        return reply.slice_id
+        reply_slice_id = context_client.SetSlice(reply)
+        return reply_slice_id
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def Authenticate(self, request : TeraFlowController, context : grpc.ServicerContext) -> AuthenticationResult:
diff --git a/src/interdomain/service/topology_abstractor/AbstractDevice.py b/src/interdomain/service/topology_abstractor/AbstractDevice.py
index 3448c1036d4ef086d679d5f4308ae95decfbffa7..4bb9683b0a597b06a4bc3a27cf4c90a2455ae995 100644
--- a/src/interdomain/service/topology_abstractor/AbstractDevice.py
+++ b/src/interdomain/service/topology_abstractor/AbstractDevice.py
@@ -14,7 +14,7 @@
 
 import copy, logging
 from typing import Dict, Optional
-from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, INTERDOMAIN_TOPOLOGY_NAME
 from common.DeviceTypes import DeviceTypeEnum
 from common.proto.context_pb2 import (
     ContextId, Device, DeviceDriverEnum, DeviceId, DeviceOperationalStatusEnum, EndPoint)
@@ -67,9 +67,9 @@ class AbstractDevice:
         is_datacenter = device_type_is_datacenter(self.__device_type)
         is_network = device_type_is_network(self.__device_type)
         if is_datacenter or is_network:
-            # Add abstract device to topologies [INTERDOMAIN_TOPOLOGY_UUID]
-            context_id = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))
-            topology_uuids = [INTERDOMAIN_TOPOLOGY_UUID]
+            # Add abstract device to topologies [INTERDOMAIN_TOPOLOGY_NAME]
+            context_id = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+            topology_uuids = [INTERDOMAIN_TOPOLOGY_NAME]
             for topology_uuid in topology_uuids:
                 add_device_to_topology(self.__context_client, context_id, topology_uuid, self.__device_uuid)
 
@@ -80,7 +80,7 @@ class AbstractDevice:
         #        self.update_endpoints(dc_device)
         #elif is_network:
         #    devices_in_admin_topology = get_devices_in_topology(
-        #        self.__context_client, context_id, DEFAULT_TOPOLOGY_UUID)
+        #        self.__context_client, context_id, DEFAULT_TOPOLOGY_NAME)
         #    for device in devices_in_admin_topology:
         #        if device_type_is_datacenter(device.device_type): continue
         #        self.update_endpoints(device)
diff --git a/src/interdomain/service/topology_abstractor/AbstractLink.py b/src/interdomain/service/topology_abstractor/AbstractLink.py
index 7fe7b07b0708ebf8490cf4304646037973b05d56..552d40d41d8758a4b398e1d970b8fb600bed6a5b 100644
--- a/src/interdomain/service/topology_abstractor/AbstractLink.py
+++ b/src/interdomain/service/topology_abstractor/AbstractLink.py
@@ -14,7 +14,7 @@
 
 import copy, logging
 from typing import Dict, List, Optional, Tuple
-from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, INTERDOMAIN_TOPOLOGY_NAME
 from common.proto.context_pb2 import ContextId, EndPointId, Link, LinkId
 from common.tools.context_queries.Link import add_link_to_topology, get_existing_link_uuids
 from common.tools.object_factory.Context import json_context_id
@@ -67,9 +67,9 @@ class AbstractLink:
         else:
             self._load_existing()
 
-        # Add abstract link to topologies [INTERDOMAIN_TOPOLOGY_UUID]
-        context_id = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))
-        topology_uuids = [INTERDOMAIN_TOPOLOGY_UUID]
+        # Add abstract link to topologies [INTERDOMAIN_TOPOLOGY_NAME]
+        context_id = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+        topology_uuids = [INTERDOMAIN_TOPOLOGY_NAME]
         for topology_uuid in topology_uuids:
             add_link_to_topology(self.__context_client, context_id, topology_uuid, self.__link_uuid)
 
diff --git a/src/interdomain/service/topology_abstractor/TopologyAbstractor.py b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py
index 5729fe733c3a9a8f73f188b40338160ab286998b..db104144eb867ccc451f01afe2c46dd9f76fac4a 100644
--- a/src/interdomain/service/topology_abstractor/TopologyAbstractor.py
+++ b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py
@@ -14,7 +14,7 @@
 
 import logging, threading
 from typing import Dict, Optional, Tuple
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME
 from common.DeviceTypes import DeviceTypeEnum
 from common.proto.context_pb2 import (
     ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EndPoint, EndPointId, Link, LinkEvent, TopologyId,
@@ -39,8 +39,8 @@ from .Types import EventTypes
 
 LOGGER = logging.getLogger(__name__)
 
-ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))
-INTERDOMAIN_TOPOLOGY_ID = TopologyId(**json_topology_id(INTERDOMAIN_TOPOLOGY_UUID, context_id=ADMIN_CONTEXT_ID))
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+INTERDOMAIN_TOPOLOGY_ID = TopologyId(**json_topology_id(INTERDOMAIN_TOPOLOGY_NAME, context_id=ADMIN_CONTEXT_ID))
 
 class TopologyAbstractor(threading.Thread):
     def __init__(self) -> None:
@@ -65,8 +65,8 @@ class TopologyAbstractor(threading.Thread):
 
     def run(self) -> None:
         self.context_client.connect()
-        create_context(self.context_client, DEFAULT_CONTEXT_UUID)
-        topology_uuids = [DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID]
+        create_context(self.context_client, DEFAULT_CONTEXT_NAME)
+        topology_uuids = [DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME]
         create_missing_topologies(self.context_client, ADMIN_CONTEXT_ID, topology_uuids)
 
         self.dlt_connector_client.connect()
@@ -96,7 +96,7 @@ class TopologyAbstractor(threading.Thread):
     #        context_uuid = event.topology_id.context_id.context_uuid.uuid
     #        if context_uuid != own_context_uuid: return True
     #        topology_uuid = event.topology_id.topology_uuid.uuid
-    #        if topology_uuid in {INTERDOMAIN_TOPOLOGY_UUID}: return True
+    #        if topology_uuid in {INTERDOMAIN_TOPOLOGY_NAME}: return True
     #
     #    return False
 
@@ -200,7 +200,7 @@ class TopologyAbstractor(threading.Thread):
         device_uuid = device.device_id.device_uuid.uuid
 
         interdomain_device_uuids = get_uuids_of_devices_in_topology(
-            self.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID)
+            self.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
 
         for endpoint in device.device_endpoints:
             if not endpoint_type_is_border(endpoint.endpoint_type): continue
@@ -236,8 +236,8 @@ class TopologyAbstractor(threading.Thread):
             topology_uuid = topology_id.topology_uuid.uuid
             context_id = topology_id.context_id
             context_uuid = context_id.context_uuid.uuid
-            topology_uuids = {DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID}
-            if (context_uuid == DEFAULT_CONTEXT_UUID) and (topology_uuid not in topology_uuids):
+            topology_uuids = {DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME}
+            if (context_uuid == DEFAULT_CONTEXT_NAME) and (topology_uuid not in topology_uuids):
                 abstract_topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=ADMIN_CONTEXT_ID))
                 self._get_or_create_abstract_device(
                     topology_uuid, DeviceTypeEnum.NETWORK, dlt_record_sender, abstract_topology_id)
diff --git a/src/load_generator/.gitlab-ci.yml b/src/load_generator/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a63bd8d0ddcdcff086e6718e2a6ea7feeb337ea1
--- /dev/null
+++ b/src/load_generator/.gitlab-ci.yml
@@ -0,0 +1,39 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build load_generator:
+  variables:
+    IMAGE_NAME: 'load_generator' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
+    - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
diff --git a/src/tests/tools/load_gen/__init__.py b/src/load_generator/Config.py
similarity index 100%
rename from src/tests/tools/load_gen/__init__.py
rename to src/load_generator/Config.py
diff --git a/src/load_generator/Dockerfile b/src/load_generator/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..8f59bb4dbc47d6180d96187eea20cc6e8cbce60c
--- /dev/null
+++ b/src/load_generator/Dockerfile
@@ -0,0 +1,77 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/load_generator
+WORKDIR /var/teraflow/load_generator
+COPY src/load_generator/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/context/__init__.py context/__init__.py
+COPY src/context/client/. context/client/
+COPY src/dlt/__init__.py dlt/__init__.py
+COPY src/dlt/connector/__init__.py dlt/connector/__init__.py
+COPY src/dlt/connector/. dlt/connector/
+COPY src/load_generator/. load_generator/
+COPY src/service/__init__.py service/__init__.py
+COPY src/service/client/. service/client/
+COPY src/slice/__init__.py slice/__init__.py
+COPY src/slice/client/. slice/client/
+
+# Start the service
+ENTRYPOINT ["python", "-m", "load_generator.service"]
diff --git a/src/load_generator/README.md b/src/load_generator/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e6b0397bf435abb91f5e7c463da32367eba142cf
--- /dev/null
+++ b/src/load_generator/README.md
@@ -0,0 +1,18 @@
+# Tool: Load Generator
+
+Simple tool to generate load in ETSI TeraFlowSDN controller with requests for creating services and slices.
+The tool can be executed form command line or from WebUI interface.
+
+## Example (Command Line):
+
+Deploy TeraFlowSDN controller with your specific settings:
+```(bash)
+cd ~/tfs-ctrl
+source my_deploy.sh 
+./deploy.sh 
+```
+
+Run the tool:
+```(bash)
+./src/load_generator/run.sh
+```
diff --git a/src/device/service/database/__init__.py b/src/load_generator/__init__.py
similarity index 75%
rename from src/device/service/database/__init__.py
rename to src/load_generator/__init__.py
index c59423e79961c8503f4469d69c53946988cae24e..70a33251242c51f49140e596b8208a19dd5245f7 100644
--- a/src/device/service/database/__init__.py
+++ b/src/load_generator/__init__.py
@@ -12,5 +12,3 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# In-Memory database with a simplified representation of Context Database focused on the Device model.
-# Used as an internal configuration cache, for message validation, and message formatting purposes.
diff --git a/src/load_generator/client/LoadGeneratorClient.py b/src/load_generator/client/LoadGeneratorClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7e215802bdbbb8f52085291c57fd4b4c82335bb
--- /dev/null
+++ b/src/load_generator/client/LoadGeneratorClient.py
@@ -0,0 +1,60 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
+from common.proto.context_pb2 import Empty
+from common.proto.load_generator_pb2_grpc import LoadGeneratorServiceStub
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.tools.grpc.Tools import grpc_message_to_json_string
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 15
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class LoadGeneratorClient:
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.LOAD_GENERATOR)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.LOAD_GENERATOR)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint))
+        self.channel = None
+        self.stub = None
+        self.connect()
+        LOGGER.debug('Channel created')
+
+    def connect(self):
+        self.channel = grpc.insecure_channel(self.endpoint)
+        self.stub = LoadGeneratorServiceStub(self.channel)
+
+    def close(self):
+        if self.channel is not None: self.channel.close()
+        self.channel = None
+        self.stub = None
+
+    @RETRY_DECORATOR
+    def Start(self, request : Empty) -> Empty:
+        LOGGER.debug('Start request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.Start(request)
+        LOGGER.debug('Start result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def Stop(self, request : Empty) -> Empty:
+        LOGGER.debug('Stop request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.Stop(request)
+        LOGGER.debug('Stop result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
diff --git a/src/load_generator/client/__init__.py b/src/load_generator/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/load_generator/client/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/load_generator/command/__init__.py b/src/load_generator/command/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/load_generator/command/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/tools/load_gen/__main__.py b/src/load_generator/command/__main__.py
similarity index 80%
rename from src/tests/tools/load_gen/__main__.py
rename to src/load_generator/command/__main__.py
index 9a5ea2b6949d1b6dd50d0a40407c6740bf266dd3..9f61fc0801017c1f917b4eafc5211e5361d33322 100644
--- a/src/tests/tools/load_gen/__main__.py
+++ b/src/load_generator/command/__main__.py
@@ -13,10 +13,11 @@
 # limitations under the License.
 
 import logging, sys
-from .Constants import RequestType
-from .Parameters import Parameters
-from .RequestGenerator import RequestGenerator
-from .RequestScheduler import RequestScheduler
+from apscheduler.schedulers.blocking import BlockingScheduler
+from load_generator.load_gen.Constants import RequestType
+from load_generator.load_gen.Parameters import Parameters
+from load_generator.load_gen.RequestGenerator import RequestGenerator
+from load_generator.load_gen.RequestScheduler import RequestScheduler
 
 logging.basicConfig(level=logging.INFO)
 LOGGER = logging.getLogger(__name__)
@@ -45,7 +46,7 @@ def main():
     generator.initialize()
 
     LOGGER.info('Running Schedule...')
-    scheduler = RequestScheduler(parameters, generator)
+    scheduler = RequestScheduler(parameters, generator, scheduler_class=BlockingScheduler)
     scheduler.start()
 
     LOGGER.info('Done!')
diff --git a/src/tests/tools/load_gen/Constants.py b/src/load_generator/load_gen/Constants.py
similarity index 100%
rename from src/tests/tools/load_gen/Constants.py
rename to src/load_generator/load_gen/Constants.py
diff --git a/src/tests/tools/load_gen/DltTools.py b/src/load_generator/load_gen/DltTools.py
similarity index 100%
rename from src/tests/tools/load_gen/DltTools.py
rename to src/load_generator/load_gen/DltTools.py
diff --git a/src/tests/tools/load_gen/Parameters.py b/src/load_generator/load_gen/Parameters.py
similarity index 94%
rename from src/tests/tools/load_gen/Parameters.py
rename to src/load_generator/load_gen/Parameters.py
index c74d18248c6000cd6da18d5c7e0e55ef2be41730..abe297039cb08c0397b9064aca81883a6de37d06 100644
--- a/src/tests/tools/load_gen/Parameters.py
+++ b/src/load_generator/load_gen/Parameters.py
@@ -17,7 +17,7 @@ from typing import List, Optional
 class Parameters:
     def __init__(
         self, num_requests : int, request_types : List[str], offered_load : Optional[float] = None,
-        inter_arrival_time : Optional[float] = None, holding_time : Optional[float] = None,
+        inter_arrival_time : Optional[float] = None, holding_time : Optional[float] = None, do_teardown : bool = True,
         dry_mode : bool = False, record_to_dlt : bool = False, dlt_domain_id : Optional[str] = None
     ) -> None:
         self._num_requests = num_requests
@@ -25,6 +25,7 @@ class Parameters:
         self._offered_load = offered_load
         self._inter_arrival_time = inter_arrival_time
         self._holding_time = holding_time
+        self._do_teardown = do_teardown
         self._dry_mode = dry_mode
         self._record_to_dlt = record_to_dlt
         self._dlt_domain_id = dlt_domain_id
@@ -58,6 +59,9 @@ class Parameters:
     @property
     def holding_time(self): return self._holding_time
 
+    @property
+    def do_teardown(self): return self._do_teardown
+
     @property
     def dry_mode(self): return self._dry_mode
 
diff --git a/src/tests/tools/load_gen/RequestGenerator.py b/src/load_generator/load_gen/RequestGenerator.py
similarity index 72%
rename from src/tests/tools/load_gen/RequestGenerator.py
rename to src/load_generator/load_gen/RequestGenerator.py
index d38291d380d044fa3b91a1b653ea47f6e917fe16..b7b1432f4fde59fc093f6dc8b9c6590cbcd77e49 100644
--- a/src/tests/tools/load_gen/RequestGenerator.py
+++ b/src/load_generator/load_gen/RequestGenerator.py
@@ -15,6 +15,7 @@
 import logging, json, random, threading
 from typing import Dict, Optional, Set, Tuple
 from common.proto.context_pb2 import Empty, TopologyId
+from common.tools.grpc.Tools import grpc_message_to_json
 from common.tools.object_factory.Constraint import json_constraint_custom
 from common.tools.object_factory.ConfigRule import json_config_rule_set
 from common.tools.object_factory.Device import json_device_id
@@ -25,8 +26,8 @@ from common.tools.object_factory.Slice import json_slice
 from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 from dlt.connector.client.DltConnectorClient import DltConnectorClient
-from tests.tools.load_gen.DltTools import record_device_to_dlt, record_link_to_dlt
 from .Constants import ENDPOINT_COMPATIBILITY, RequestType
+from .DltTools import record_device_to_dlt, record_link_to_dlt
 from .Parameters import Parameters
 
 LOGGER = logging.getLogger(__name__)
@@ -41,6 +42,9 @@ class RequestGenerator:
         self._endpoint_ids_to_types : Dict[Tuple[str, str], str] = dict()
         self._endpoint_types_to_ids : Dict[str, Set[Tuple[str, str]]] = dict()
 
+        self._device_data : Dict[str, Dict] = dict()
+        self._device_endpoint_data : Dict[str, Dict[str, Dict]] = dict()
+
     def initialize(self) -> None:
         with self._lock:
             self._available_device_endpoints.clear()
@@ -55,9 +59,14 @@ class RequestGenerator:
             devices = context_client.ListDevices(Empty())
             for device in devices.devices:
                 device_uuid = device.device_id.device_uuid.uuid
+                self._device_data[device_uuid] = grpc_message_to_json(device)
+
                 _endpoints = self._available_device_endpoints.setdefault(device_uuid, set())
                 for endpoint in device.device_endpoints:
                     endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
+                    endpoints = self._device_endpoint_data.setdefault(device_uuid, dict())
+                    endpoints[endpoint_uuid] = grpc_message_to_json(endpoint)
+
                     endpoint_type = endpoint.endpoint_type
                     _endpoints.add(endpoint_uuid)
                     self._endpoint_ids_to_types.setdefault((device_uuid, endpoint_uuid), endpoint_type)
@@ -191,7 +200,8 @@ class RequestGenerator:
         dst_endpoint_types = {dst_endpoint_type} if request_type in {RequestType.SERVICE_TAPI} else None
 
         # identify excluded destination devices
-        exclude_device_uuids = {} if request_type in {RequestType.SERVICE_TAPI, RequestType.SERVICE_MW} else {src_device_uuid}
+        REQUESTTYPES_REUSING_DEVICES = {RequestType.SERVICE_TAPI, RequestType.SERVICE_MW}
+        exclude_device_uuids = {} if request_type in REQUESTTYPES_REUSING_DEVICES else {src_device_uuid}
 
         # choose feasible destination endpoint
         dst = self._use_device_endpoint(
@@ -218,26 +228,33 @@ class RequestGenerator:
             ]
             vlan_id = num_request % 1000
             circuit_id = '{:03d}'.format(vlan_id)
-            src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
-            dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
+
+            src_device_name = self._device_data[src_device_uuid]['name']
+            src_router_id = '10.0.0.{:d}'.format(int(src_device_name.replace('R', '')))
+
+            dst_device_name = self._device_data[dst_device_uuid]['name']
+            dst_router_id = '10.0.0.{:d}'.format(int(dst_device_name.replace('R', '')))
+
             config_rules = [
                 json_config_rule_set('/settings', {
                     'mtu': 1512
                 }),
-                json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), {
-                    'router_id': src_router_id,
-                    'sub_interface_index': vlan_id,
-                    'vlan_id': vlan_id,
-                    'remote_router': dst_router_id,
-                    'circuit_id': circuit_id,
-                }),
-                json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), {
-                    'router_id': dst_router_id,
-                    'sub_interface_index': vlan_id,
-                    'vlan_id': vlan_id,
-                    'remote_router': src_router_id,
-                    'circuit_id': circuit_id,
-                }),
+                json_config_rule_set(
+                    '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), {
+                        'router_id': src_router_id,
+                        'sub_interface_index': vlan_id,
+                        'vlan_id': vlan_id,
+                        'remote_router': dst_router_id,
+                        'circuit_id': circuit_id,
+                    }),
+                json_config_rule_set(
+                    '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), {
+                        'router_id': dst_router_id,
+                        'sub_interface_index': vlan_id,
+                        'vlan_id': vlan_id,
+                        'remote_router': src_router_id,
+                        'circuit_id': circuit_id,
+                    }),
             ]
             return json_service_l2nm_planned(
                 request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules)
@@ -251,32 +268,41 @@ class RequestGenerator:
             bgp_as = 60000 + (num_request % 10000)
             bgp_route_target = '{:5d}:{:03d}'.format(bgp_as, 333)
             route_distinguisher = '{:5d}:{:03d}'.format(bgp_as, vlan_id)
-            src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
-            dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
-            src_address_ip = '.'.join([src_device_uuid.replace('R', ''), '0'] + src_endpoint_uuid.split('/'))
-            dst_address_ip = '.'.join([dst_device_uuid.replace('R', ''), '0'] + dst_endpoint_uuid.split('/'))
+
+            src_device_name = self._device_data[src_device_uuid]['name']
+            src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name']
+            src_router_id = '10.0.0.{:d}'.format(int(src_device_name.replace('R', '')))
+            src_address_ip = '.'.join([src_device_name.replace('R', ''), '0'] + src_endpoint_name.split('/'))
+
+            dst_device_name = self._device_data[dst_device_uuid]['name']
+            dst_endpoint_name = self._device_endpoint_data[dst_device_uuid][dst_endpoint_uuid]['name']
+            dst_router_id = '10.0.0.{:d}'.format(int(dst_device_name.replace('R', '')))
+            dst_address_ip = '.'.join([dst_device_name.replace('R', ''), '0'] + dst_endpoint_name.split('/'))
+
             config_rules = [
                 json_config_rule_set('/settings', {
                     'mtu'             : 1512,
                     'bgp_as'          : bgp_as,
                     'bgp_route_target': bgp_route_target,
                 }),
-                json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), {
-                    'router_id'          : src_router_id,
-                    'route_distinguisher': route_distinguisher,
-                    'sub_interface_index': vlan_id,
-                    'vlan_id'            : vlan_id,
-                    'address_ip'         : src_address_ip,
-                    'address_prefix'     : 16,
-                }),
-                json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), {
-                    'router_id'          : dst_router_id,
-                    'route_distinguisher': route_distinguisher,
-                    'sub_interface_index': vlan_id,
-                    'vlan_id'            : vlan_id,
-                    'address_ip'         : dst_address_ip,
-                    'address_prefix'     : 16,
-                }),
+                json_config_rule_set(
+                    '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), {
+                        'router_id'          : src_router_id,
+                        'route_distinguisher': route_distinguisher,
+                        'sub_interface_index': vlan_id,
+                        'vlan_id'            : vlan_id,
+                        'address_ip'         : src_address_ip,
+                        'address_prefix'     : 16,
+                    }),
+                json_config_rule_set(
+                    '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), {
+                        'router_id'          : dst_router_id,
+                        'route_distinguisher': route_distinguisher,
+                        'sub_interface_index': vlan_id,
+                        'vlan_id'            : vlan_id,
+                        'address_ip'         : dst_address_ip,
+                        'address_prefix'     : 16,
+                    }),
             ]
             return json_service_l3nm_planned(
                 request_uuid, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules)
@@ -313,7 +339,8 @@ class RequestGenerator:
         src_device_uuid,src_endpoint_uuid = src
 
         # identify excluded destination devices
-        exclude_device_uuids = {} if request_type in {RequestType.SERVICE_TAPI, RequestType.SERVICE_MW} else {src_device_uuid}
+        REQUESTTYPES_REUSING_DEVICES = {RequestType.SERVICE_TAPI, RequestType.SERVICE_MW}
+        exclude_device_uuids = {} if request_type in REQUESTTYPES_REUSING_DEVICES else {src_device_uuid}
 
         # choose feasible destination endpoint
         dst = self._use_device_endpoint(request_uuid, request_type, exclude_device_uuids=exclude_device_uuids)
@@ -338,26 +365,33 @@ class RequestGenerator:
         if request_type == RequestType.SLICE_L2NM:
             vlan_id = num_request % 1000
             circuit_id = '{:03d}'.format(vlan_id)
-            src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
-            dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
+
+            src_device_name = self._device_data[src_device_uuid]['name']
+            src_router_id = '10.0.0.{:d}'.format(int(src_device_name.replace('R', '')))
+
+            dst_device_name = self._device_data[dst_device_uuid]['name']
+            dst_router_id = '10.0.0.{:d}'.format(int(dst_device_name.replace('R', '')))
+
             config_rules = [
                 json_config_rule_set('/settings', {
                     'mtu': 1512
                 }),
-                json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), {
-                    'router_id': src_router_id,
-                    'sub_interface_index': vlan_id,
-                    'vlan_id': vlan_id,
-                    'remote_router': dst_router_id,
-                    'circuit_id': circuit_id,
-                }),
-                json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), {
-                    'router_id': dst_router_id,
-                    'sub_interface_index': vlan_id,
-                    'vlan_id': vlan_id,
-                    'remote_router': src_router_id,
-                    'circuit_id': circuit_id,
-                }),
+                json_config_rule_set(
+                    '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), {
+                        'router_id': src_router_id,
+                        'sub_interface_index': vlan_id,
+                        'vlan_id': vlan_id,
+                        'remote_router': dst_router_id,
+                        'circuit_id': circuit_id,
+                    }),
+                json_config_rule_set(
+                    '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), {
+                        'router_id': dst_router_id,
+                        'sub_interface_index': vlan_id,
+                        'vlan_id': vlan_id,
+                        'remote_router': src_router_id,
+                        'circuit_id': circuit_id,
+                    }),
             ]
 
         elif request_type == RequestType.SLICE_L3NM:
@@ -365,32 +399,41 @@ class RequestGenerator:
             bgp_as = 60000 + (num_request % 10000)
             bgp_route_target = '{:5d}:{:03d}'.format(bgp_as, 333)
             route_distinguisher = '{:5d}:{:03d}'.format(bgp_as, vlan_id)
-            src_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
-            dst_router_id = '10.0.0.{:d}'.format(int(src_device_uuid.replace('R', '')))
-            src_address_ip = '.'.join([src_device_uuid.replace('R', ''), '0'] + src_endpoint_uuid.split('/'))
-            dst_address_ip = '.'.join([dst_device_uuid.replace('R', ''), '0'] + dst_endpoint_uuid.split('/'))
+
+            src_device_name = self._device_data[src_device_uuid]['name']
+            src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name']
+            src_router_id = '10.0.0.{:d}'.format(int(src_device_name.replace('R', '')))
+            src_address_ip = '.'.join([src_device_name.replace('R', ''), '0'] + src_endpoint_name.split('/'))
+
+            dst_device_name = self._device_data[dst_device_uuid]['name']
+            dst_endpoint_name = self._device_endpoint_data[dst_device_uuid][dst_endpoint_uuid]['name']
+            dst_router_id = '10.0.0.{:d}'.format(int(dst_device_name.replace('R', '')))
+            dst_address_ip = '.'.join([dst_device_name.replace('R', ''), '0'] + dst_endpoint_name.split('/'))
+
             config_rules = [
                 json_config_rule_set('/settings', {
                     'mtu'             : 1512,
                     'bgp_as'          : bgp_as,
                     'bgp_route_target': bgp_route_target,
                 }),
-                json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), {
-                    'router_id'          : src_router_id,
-                    'route_distinguisher': route_distinguisher,
-                    'sub_interface_index': vlan_id,
-                    'vlan_id'            : vlan_id,
-                    'address_ip'         : src_address_ip,
-                    'address_prefix'     : 16,
-                }),
-                json_config_rule_set('/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), {
-                    'router_id'          : dst_router_id,
-                    'route_distinguisher': route_distinguisher,
-                    'sub_interface_index': vlan_id,
-                    'vlan_id'            : vlan_id,
-                    'address_ip'         : dst_address_ip,
-                    'address_prefix'     : 16,
-                }),
+                json_config_rule_set(
+                    '/device[{:s}]/endpoint[{:s}]/settings'.format(src_device_uuid, src_endpoint_uuid), {
+                        'router_id'          : src_router_id,
+                        'route_distinguisher': route_distinguisher,
+                        'sub_interface_index': vlan_id,
+                        'vlan_id'            : vlan_id,
+                        'address_ip'         : src_address_ip,
+                        'address_prefix'     : 16,
+                    }),
+                json_config_rule_set(
+                    '/device[{:s}]/endpoint[{:s}]/settings'.format(dst_device_uuid, dst_endpoint_uuid), {
+                        'router_id'          : dst_router_id,
+                        'route_distinguisher': route_distinguisher,
+                        'sub_interface_index': vlan_id,
+                        'vlan_id'            : vlan_id,
+                        'address_ip'         : dst_address_ip,
+                        'address_prefix'     : 16,
+                    }),
             ]
 
         return json_slice(
diff --git a/src/tests/tools/load_gen/RequestScheduler.py b/src/load_generator/load_gen/RequestScheduler.py
similarity index 93%
rename from src/tests/tools/load_gen/RequestScheduler.py
rename to src/load_generator/load_gen/RequestScheduler.py
index eafb95c30032e69ab4f2f7874656b11db4f6817f..e2a804d7f73c584d3d71bf7801a330705712eeae 100644
--- a/src/tests/tools/load_gen/RequestScheduler.py
+++ b/src/load_generator/load_gen/RequestScheduler.py
@@ -31,8 +31,10 @@ logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING)
 LOGGER = logging.getLogger(__name__)
 
 class RequestScheduler:
-    def __init__(self, parameters : Parameters, generator : RequestGenerator) -> None:
-        self._scheduler = BlockingScheduler()
+    def __init__(
+        self, parameters : Parameters, generator : RequestGenerator, scheduler_class=BlockingScheduler
+    ) -> None:
+        self._scheduler = scheduler_class()
         self._scheduler.configure(
             jobstores = {'default': MemoryJobStore()},
             executors = {'default': ThreadPoolExecutor(max_workers=10)},
@@ -46,7 +48,9 @@ class RequestScheduler:
         self._generator = generator
 
     def _schedule_request_setup(self) -> None:
-        if self._generator.num_requests_generated >= self._parameters.num_requests:
+        infinite_loop = self._parameters.num_requests == 0
+        num_requests_generated = self._generator.num_requests_generated - 1 # because it first increases, then checks
+        if not infinite_loop and (num_requests_generated >= self._parameters.num_requests):
             LOGGER.info('Generation Done!')
             #self._scheduler.shutdown()
             return
@@ -65,6 +69,9 @@ class RequestScheduler:
         self._schedule_request_setup()
         self._scheduler.start()
 
+    def stop(self):
+        self._scheduler.shutdown()
+
     def _request_setup(self) -> None:
         self._schedule_request_setup()
 
@@ -93,7 +100,8 @@ class RequestScheduler:
                 slice_uuid, src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid)
             self._create_update(slice_=request)
 
-        self._schedule_request_teardown(request)
+        if self._parameters.do_teardown:
+            self._schedule_request_teardown(request)
 
     def _request_teardown(self, request : Dict) -> None:
         if 'service_id' in request:
diff --git a/src/load_generator/load_gen/__init__.py b/src/load_generator/load_gen/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/load_generator/load_gen/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/load_generator/requirements.in b/src/load_generator/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..61a0a0efbeb0d2295df8d8dacdee3f7f1235f80a
--- /dev/null
+++ b/src/load_generator/requirements.in
@@ -0,0 +1,15 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+APScheduler==3.8.1
diff --git a/src/tests/tools/load_gen/run.sh b/src/load_generator/run.sh
similarity index 90%
rename from src/tests/tools/load_gen/run.sh
rename to src/load_generator/run.sh
index b16808ab6905927728212185681e2a6d4a5135ba..35db1ad4db965a7d3dfbdfe1c114c8bc0df39e2f 100755
--- a/src/tests/tools/load_gen/run.sh
+++ b/src/load_generator/run.sh
@@ -13,5 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+# Use this script to run standalone
+
 source tfs_runtime_env_vars.sh
-python -m tests.tools.load_gen
+python -m load_generator.command
diff --git a/src/load_generator/service/LoadGeneratorService.py b/src/load_generator/service/LoadGeneratorService.py
new file mode 100644
index 0000000000000000000000000000000000000000..0127e5f86f9f46e50ec5c15c65997255f5587d88
--- /dev/null
+++ b/src/load_generator/service/LoadGeneratorService.py
@@ -0,0 +1,28 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.proto.load_generator_pb2_grpc import add_LoadGeneratorServiceServicer_to_server
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from .LoadGeneratorServiceServicerImpl import LoadGeneratorServiceServicerImpl
+
+class LoadGeneratorService(GenericGrpcService):
+    def __init__(self, cls_name: str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.LOAD_GENERATOR)
+        super().__init__(port, cls_name=cls_name)
+        self.load_generator_servicer = LoadGeneratorServiceServicerImpl()
+
+    def install_servicers(self):
+        add_LoadGeneratorServiceServicer_to_server(self.load_generator_servicer, self.server)
diff --git a/src/load_generator/service/LoadGeneratorServiceServicerImpl.py b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..67158f1bffc53a2a8ba095344ec0e593d6597085
--- /dev/null
+++ b/src/load_generator/service/LoadGeneratorServiceServicerImpl.py
@@ -0,0 +1,64 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Optional
+import grpc, logging
+from apscheduler.schedulers.background import BackgroundScheduler
+from common.proto.context_pb2 import Empty
+from common.proto.load_generator_pb2_grpc import LoadGeneratorServiceServicer
+from load_generator.load_gen.Constants import RequestType
+from load_generator.load_gen.Parameters import Parameters
+from load_generator.load_gen.RequestGenerator import RequestGenerator
+from load_generator.load_gen.RequestScheduler import RequestScheduler
+
+LOGGER = logging.getLogger(__name__)
+
+class LoadGeneratorServiceServicerImpl(LoadGeneratorServiceServicer):
+    def __init__(self):
+        LOGGER.debug('Creating Servicer...')
+        self._parameters = Parameters(
+            num_requests = 100,
+            request_types = [
+                RequestType.SERVICE_L2NM,
+                RequestType.SERVICE_L3NM,
+                #RequestType.SERVICE_MW,
+                #RequestType.SERVICE_TAPI,
+                RequestType.SLICE_L2NM,
+                RequestType.SLICE_L3NM,
+            ],
+            offered_load  = 50,
+            holding_time  = 10,
+            do_teardown   = True,
+            dry_mode      = False,           # in dry mode, no request is sent to TeraFlowSDN
+            record_to_dlt = False,           # if record_to_dlt, changes in device/link/service/slice are uploaded to DLT
+            dlt_domain_id = 'dlt-perf-eval', # domain used to uploaded entities, ignored when record_to_dlt = False
+        )
+        self._generator : Optional[RequestGenerator] = None
+        self._scheduler : Optional[RequestScheduler] = None
+        LOGGER.debug('Servicer Created')
+
+    def Start(self, request : Empty, context : grpc.ServicerContext) -> Empty:
+        LOGGER.info('Initializing Generator...')
+        self._generator = RequestGenerator(self._parameters)
+        self._generator.initialize()
+
+        LOGGER.info('Running Schedule...')
+        self._scheduler = RequestScheduler(self._parameters, self._generator, scheduler_class=BackgroundScheduler)
+        self._scheduler.start()
+        return Empty()
+
+    def Stop(self, request : Empty, context : grpc.ServicerContext) -> Empty:
+        if self._scheduler is not None:
+            self._scheduler.stop()
+        return Empty()
diff --git a/src/load_generator/service/__init__.py b/src/load_generator/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/load_generator/service/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/load_generator/service/__main__.py b/src/load_generator/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f49ee244532f6e035a35f7e5e1d02f0d9a1767d
--- /dev/null
+++ b/src/load_generator/service/__main__.py
@@ -0,0 +1,64 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level,
+    wait_for_environment_variables)
+from .LoadGeneratorService import LoadGeneratorService
+
+log_level = get_log_level()
+logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+LOGGER = logging.getLogger(__name__)
+
+terminate = threading.Event()
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    wait_for_environment_variables([
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+        get_env_var_name(ServiceNameEnum.SLICE,   ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.SLICE,   ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+    ])
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.info('Starting...')
+
+    # Starting load generator service
+    grpc_service = LoadGeneratorService()
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=0.1): pass
+
+    scheduler = grpc_service.load_generator_servicer._scheduler
+    if scheduler is not None: scheduler.stop()
+
+    LOGGER.info('Terminating...')
+    grpc_service.stop()
+
+    LOGGER.info('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/load_generator/tests/__init__.py b/src/load_generator/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/load_generator/tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/tools/load_gen/deploy_specs.sh b/src/load_generator/tests/deploy_specs.sh
similarity index 95%
rename from src/tests/tools/load_gen/deploy_specs.sh
rename to src/load_generator/tests/deploy_specs.sh
index a688f1c0ad920bab2fb5157dce72225671ed837e..a5af70b04a84ffa83b0e19da005175f0291c4f93 100644
--- a/src/tests/tools/load_gen/deploy_specs.sh
+++ b/src/load_generator/tests/deploy_specs.sh
@@ -7,7 +7,7 @@ export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
 #   interdomain slice pathcomp dlt
 #   dbscanserving opticalattackmitigator opticalattackdetector
 #   l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector
-export TFS_COMPONENTS="context device pathcomp service slice webui" # automation monitoring compute dlt
+export TFS_COMPONENTS="context device pathcomp service slice webui load_generator" # automation monitoring compute dlt
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
diff --git a/src/tests/tools/load_gen/descriptors.json b/src/load_generator/tests/descriptors.json
similarity index 100%
rename from src/tests/tools/load_gen/descriptors.json
rename to src/load_generator/tests/descriptors.json
diff --git a/src/tests/tools/load_gen/test_dlt_functional.py b/src/load_generator/tests/test_dlt_functional.py
similarity index 100%
rename from src/tests/tools/load_gen/test_dlt_functional.py
rename to src/load_generator/tests/test_dlt_functional.py
diff --git a/src/monitoring/.gitlab-ci.yml b/src/monitoring/.gitlab-ci.yml
index ef3a8c39a045dd059f8a7942223bdc20775ae92c..4a981cba23f5197c08e673afe292505d38577982 100644
--- a/src/monitoring/.gitlab-ci.yml
+++ b/src/monitoring/.gitlab-ci.yml
@@ -39,7 +39,7 @@ build monitoring:
       - .gitlab-ci.yml
 
 # Apply unit test to the component
-unit test monitoring:
+unit_test monitoring:
   variables:
     IMAGE_NAME: 'monitoring' # name of the microservice
     IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
@@ -84,28 +84,28 @@ unit test monitoring:
       reports:
         junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
 
-# Deployment of the service in Kubernetes Cluster
-deploy monitoring:
-  variables:
-    IMAGE_NAME: 'monitoring' # name of the microservice
-    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
-  stage: deploy
-  needs:
-    - unit test monitoring
-    # - integ_test execute
-  script:
-    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
-    - kubectl version
-    - kubectl get all
-    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
-    - kubectl get all
-  # environment:
-  #   name: test
-  #   url: https://example.com
-  #   kubernetes:
-  #     namespace: test
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
-      when: manual    
-    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
-      when: manual
+## Deployment of the service in Kubernetes Cluster
+#deploy monitoring:
+#  variables:
+#    IMAGE_NAME: 'monitoring' # name of the microservice
+#    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+#  stage: deploy
+#  needs:
+#    - unit test monitoring
+#    # - integ_test execute
+#  script:
+#    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+#    - kubectl get all
+#  # environment:
+#  #   name: test
+#  #   url: https://example.com
+#  #   kubernetes:
+#  #     namespace: test
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual    
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
diff --git a/src/monitoring/requirements.in b/src/monitoring/requirements.in
index c07f0c8f4079482a20a138d190004fa314fc9860..fd7a555cf5fa48b3a976feb51613935667943448 100644
--- a/src/monitoring/requirements.in
+++ b/src/monitoring/requirements.in
@@ -1,6 +1,6 @@
 anytree==2.8.0
 APScheduler==3.8.1
-fastcache==1.1.0
+#fastcache==1.1.0
 #google-api-core
 #opencensus[stackdriver]
 #google-cloud-profiler
@@ -13,14 +13,8 @@ influx-line-protocol==0.1.4
 python-dateutil==2.8.2
 python-json-logger==2.0.2
 pytz==2021.3
-redis==4.1.2
+#redis==4.1.2
 requests==2.27.1
 xmltodict==0.12.0
 questdb==1.0.1
 psycopg2-binary==2.9.3
-
-# pip's dependency resolver does not take into account installed packages.
-# p4runtime does not specify the version of grpcio/protobuf it needs, so it tries to install latest one
-# adding here again grpcio==1.47.* and protobuf==3.20.* with explicit versions to prevent collisions
-grpcio==1.47.*
-protobuf==3.20.*
diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py
index b113f5a7822841e17274300dc7102664bce1c409..e70827cbc453d7ab754cddb6d7c2471a5f4c5bae 100644
--- a/src/monitoring/tests/test_unitary.py
+++ b/src/monitoring/tests/test_unitary.py
@@ -12,58 +12,47 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import copy, os, pytest
-import threading
-import time
-from queue import Queue
+import copy, os, pytest #, threading, time
+import logging
+#from queue import Queue
 from random import random
 from time import sleep
-from typing import Tuple
-
+from typing import Union #, Tuple
 from apscheduler.executors.pool import ProcessPoolExecutor
 from apscheduler.schedulers.background import BackgroundScheduler
 from apscheduler.schedulers.base import STATE_STOPPED
 from grpc._channel import _MultiThreadedRendezvous
-
 from common.Constants import ServiceNameEnum
 from common.Settings import (
     ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
-from common.logger import getJSONLogger
-from common.orm.Database import Database
-from common.orm.Factory import get_database_backend, BackendEnum as DatabaseBackendEnum
-from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum
-from common.message_broker.MessageBroker import MessageBroker
+#from common.logger import getJSONLogger
 from common.proto import monitoring_pb2
+from common.proto.context_pb2 import EventTypeEnum, DeviceEvent, Device, Empty
+from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
-from common.proto.monitoring_pb2 import KpiId, KpiDescriptor, KpiList, SubsDescriptor, SubsList, AlarmID, \
-    AlarmDescriptor, AlarmList, Kpi, KpiDescriptorList, SubsResponse, AlarmResponse, RawKpiTable
-from common.tools.timestamp.Converters import timestamp_utcnow_to_float, timestamp_string_to_float
-
+from common.proto.monitoring_pb2 import KpiId, KpiDescriptor, SubsDescriptor, SubsList, AlarmID, \
+    AlarmDescriptor, AlarmList, KpiDescriptorList, SubsResponse, AlarmResponse, RawKpiTable #, Kpi, KpiList
+from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from common.tools.timestamp.Converters import timestamp_utcnow_to_float #, timestamp_string_to_float
 from context.client.ContextClient import ContextClient
-from context.service.grpc_server.ContextService import ContextService
-from common.proto.context_pb2 import EventTypeEnum, DeviceEvent, Device, Empty
-
 from device.client.DeviceClient import DeviceClient
 from device.service.DeviceService import DeviceService
 from device.service.driver_api.DriverFactory import DriverFactory
 from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
-from monitoring.service.AlarmManager import AlarmManager
-from monitoring.service.MetricsDBTools import MetricsDB
-from monitoring.service.SubscriptionManager import SubscriptionManager
-
-os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE'
-from device.service.drivers import DRIVERS  # pylint: disable=wrong-import-position
-
 from monitoring.client.MonitoringClient import MonitoringClient
 from monitoring.service import ManagementDBTools, MetricsDBTools
-from monitoring.service.MonitoringService import MonitoringService
+#from monitoring.service.AlarmManager import AlarmManager
 from monitoring.service.EventTools import EventsDeviceCollector
+from monitoring.service.MetricsDBTools import MetricsDB
+from monitoring.service.MonitoringService import MonitoringService
+#from monitoring.service.SubscriptionManager import SubscriptionManager
 from monitoring.tests.Messages import create_kpi_request, include_kpi_request, monitor_kpi_request, \
-    create_kpi_request_b, create_kpi_request_c, kpi_query, subs_descriptor, alarm_descriptor, \
-    alarm_subscription
+    create_kpi_request_c, kpi_query, subs_descriptor, alarm_descriptor, alarm_subscription #, create_kpi_request_b
 from monitoring.tests.Objects import DEVICE_DEV1, DEVICE_DEV1_CONNECT_RULES, DEVICE_DEV1_UUID
 
-from monitoring.service.MonitoringServiceServicerImpl import LOGGER
+os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE'
+from device.service.drivers import DRIVERS  # pylint: disable=wrong-import-position,ungrouped-imports
 
 
 ###########################
@@ -71,49 +60,51 @@ from monitoring.service.MonitoringServiceServicerImpl import LOGGER
 ###########################
 
 LOCAL_HOST = '127.0.0.1'
-
-CONTEXT_SERVICE_PORT = 10000 + get_service_port_grpc(ServiceNameEnum.CONTEXT) # avoid privileged ports
+MOCKSERVICE_PORT = 10000
 os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
-os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(CONTEXT_SERVICE_PORT)
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(MOCKSERVICE_PORT)
 
-DEVICE_SERVICE_PORT = 10000 + get_service_port_grpc(ServiceNameEnum.DEVICE) # avoid privileged ports
+DEVICE_SERVICE_PORT = MOCKSERVICE_PORT + get_service_port_grpc(ServiceNameEnum.DEVICE) # avoid privileged ports
 os.environ[get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
 os.environ[get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(DEVICE_SERVICE_PORT)
 
-MONITORING_SERVICE_PORT = 10000 + get_service_port_grpc(ServiceNameEnum.MONITORING) # avoid privileged ports
+MONITORING_SERVICE_PORT = MOCKSERVICE_PORT + get_service_port_grpc(ServiceNameEnum.MONITORING) # avoid privileged ports
 os.environ[get_env_var_name(ServiceNameEnum.MONITORING, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
 os.environ[get_env_var_name(ServiceNameEnum.MONITORING, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(MONITORING_SERVICE_PORT)
 
-METRICSDB_HOSTNAME = os.environ.get("METRICSDB_HOSTNAME")
-METRICSDB_ILP_PORT = os.environ.get("METRICSDB_ILP_PORT")
-METRICSDB_REST_PORT = os.environ.get("METRICSDB_REST_PORT")
-METRICSDB_TABLE = os.environ.get("METRICSDB_TABLE")
+METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME')
+METRICSDB_ILP_PORT = os.environ.get('METRICSDB_ILP_PORT')
+METRICSDB_REST_PORT = os.environ.get('METRICSDB_REST_PORT')
+METRICSDB_TABLE = os.environ.get('METRICSDB_TABLE')
 
+LOGGER = logging.getLogger(__name__)
 
-@pytest.fixture(scope='session')
-def context_db_mb() -> Tuple[Database, MessageBroker]:
-    _database = Database(get_database_backend(backend=DatabaseBackendEnum.INMEMORY))
-    _message_broker = MessageBroker(get_messagebroker_backend(backend=MessageBrokerBackendEnum.INMEMORY))
-    yield _database, _message_broker
-    _message_broker.terminate()
+class MockContextService(GenericGrpcService):
+    # Mock Service implementing Context to simplify unitary tests of Monitoring
+
+    def __init__(self, bind_port: Union[str, int]) -> None:
+        super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService')
+
+    # pylint: disable=attribute-defined-outside-init
+    def install_servicers(self):
+        self.context_servicer = MockServicerImpl_Context()
+        add_ContextServiceServicer_to_server(self.context_servicer, self.server)
 
 @pytest.fixture(scope='session')
-def context_service(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name
-    database, message_broker = context_db_mb
-    database.clear_all()
-    _service = ContextService(database, message_broker)
+def context_service():
+    _service = MockContextService(MOCKSERVICE_PORT)
     _service.start()
     yield _service
     _service.stop()
 
 @pytest.fixture(scope='session')
-def context_client(context_service : ContextService): # pylint: disable=redefined-outer-name
+def context_client(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument
     _client = ContextClient()
     yield _client
     _client.close()
 
 @pytest.fixture(scope='session')
-def device_service(context_service : ContextService): # pylint: disable=redefined-outer-name
+def device_service(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument
     LOGGER.info('Initializing DeviceService...')
     driver_factory = DriverFactory(DRIVERS)
     driver_instance_cache = DriverInstanceCache(driver_factory)
@@ -128,7 +119,7 @@ def device_service(context_service : ContextService): # pylint: disable=redefine
     _service.stop()
 
 @pytest.fixture(scope='session')
-def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name
+def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument
     _client = DeviceClient()
     yield _client
     _client.close()
@@ -136,8 +127,8 @@ def device_client(device_service : DeviceService): # pylint: disable=redefined-o
 # This fixture will be requested by test cases and last during testing session
 @pytest.fixture(scope='session')
 def monitoring_service(
-        context_service : ContextService,  # pylint: disable=redefined-outer-name
-        device_service : DeviceService     # pylint: disable=redefined-outer-name
+        context_service : MockContextService,  # pylint: disable=redefined-outer-name,unused-argument
+        device_service : DeviceService     # pylint: disable=redefined-outer-name,unused-argument
     ):
     LOGGER.info('Initializing MonitoringService...')
     _service = MonitoringService()
@@ -153,7 +144,7 @@ def monitoring_service(
 # This fixture will be requested by test cases and last during testing session.
 # The client requires the server, so client fixture has the server as dependency.
 @pytest.fixture(scope='session')
-def monitoring_client(monitoring_service : MonitoringService): # pylint: disable=redefined-outer-name
+def monitoring_client(monitoring_service : MonitoringService): # pylint: disable=redefined-outer-name,unused-argument
     LOGGER.info('Initializing MonitoringClient...')
     _client = MonitoringClient()
 
@@ -183,10 +174,13 @@ def subs_scheduler():
     return _scheduler
 
 def ingestion_data(kpi_id_int):
-    metrics_db = MetricsDB("localhost", "9009", "9000", "monitoring")
+    # pylint: disable=redefined-outer-name,unused-argument
+    metrics_db = MetricsDB('localhost', '9009', '9000', 'monitoring')
 
-    for i in range(50):
-        kpiSampleType   = KpiSampleType.Name(KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED).upper().replace('KPISAMPLETYPE_', '')
+    kpiSampleType = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
+    kpiSampleType_name = KpiSampleType.Name(kpiSampleType).upper().replace('KPISAMPLETYPE_', '')
+    for _ in range(50):
+        kpiSampleType   = kpiSampleType_name
         kpiId           = kpi_id_int
         deviceId        = 'DEV'+ str(kpi_id_int)
         endpointId      = 'END' + str(kpi_id_int)
@@ -250,32 +244,18 @@ def test_include_kpi(monitoring_client): # pylint: disable=redefined-outer-name
 
 # Test case that makes use of client fixture to test server's MonitorKpi method
 def test_monitor_kpi(
-        context_client : ContextClient,                 # pylint: disable=redefined-outer-name
+        context_client : ContextClient,                 # pylint: disable=redefined-outer-name,unused-argument
         device_client : DeviceClient,                   # pylint: disable=redefined-outer-name
         monitoring_client : MonitoringClient,           # pylint: disable=redefined-outer-name
-        context_db_mb : Tuple[Database, MessageBroker]  # pylint: disable=redefined-outer-name
     ):
     LOGGER.info('test_monitor_kpi begin')
 
-    context_database = context_db_mb[0]
-
-    # ----- Clean the database -----------------------------------------------------------------------------------------
-    context_database.clear_all()
-
-    # ----- Dump state of database before create the object ------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 0
-
     # ----- Update the object ------------------------------------------------------------------------------------------
     LOGGER.info('Adding Device {:s}'.format(DEVICE_DEV1_UUID))
     device_with_connect_rules = copy.deepcopy(DEVICE_DEV1)
     device_with_connect_rules['device_config']['config_rules'].extend(DEVICE_DEV1_CONNECT_RULES)
-    response = device_client.AddDevice(Device(**device_with_connect_rules))
-    assert response.device_uuid.uuid == DEVICE_DEV1_UUID
+    device_id = device_client.AddDevice(Device(**device_with_connect_rules))
+    assert device_id.device_uuid.uuid == DEVICE_DEV1_UUID
 
     response = monitoring_client.SetKpi(create_kpi_request('1'))
     _monitor_kpi_request = monitor_kpi_request(response.kpi_id.uuid, 120, 5) # pylint: disable=maybe-no-member
@@ -283,6 +263,8 @@ def test_monitor_kpi(
     LOGGER.debug(str(response))
     assert isinstance(response, Empty)
 
+    device_client.DeleteDevice(device_id)
+
 # Test case that makes use of client fixture to test server's QueryKpiData method
 def test_query_kpi_data(monitoring_client,subs_scheduler): # pylint: disable=redefined-outer-name
 
@@ -313,7 +295,7 @@ def test_set_kpi_subscription(monitoring_client,subs_scheduler): # pylint: disab
         subs_scheduler.shutdown()
 
 # Test case that makes use of client fixture to test server's GetSubsDescriptor method
-def test_get_subs_descriptor(monitoring_client):
+def test_get_subs_descriptor(monitoring_client): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_get_subs_descriptor')
     kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
     monitoring_client.IncludeKpi(include_kpi_request(kpi_id))
@@ -324,14 +306,14 @@ def test_get_subs_descriptor(monitoring_client):
         assert isinstance(response, SubsDescriptor)
 
 # Test case that makes use of client fixture to test server's GetSubscriptions method
-def test_get_subscriptions(monitoring_client):
+def test_get_subscriptions(monitoring_client): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_get_subscriptions')
     response = monitoring_client.GetSubscriptions(Empty())
     LOGGER.debug(response)
     assert isinstance(response, SubsList)
 
 # Test case that makes use of client fixture to test server's DeleteSubscription method
-def test_delete_subscription(monitoring_client):
+def test_delete_subscription(monitoring_client): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_delete_subscription')
     kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
     monitoring_client.IncludeKpi(include_kpi_request(kpi_id))
@@ -341,7 +323,7 @@ def test_delete_subscription(monitoring_client):
         assert isinstance(response, Empty)
 
 # Test case that makes use of client fixture to test server's SetKpiAlarm method
-def test_set_kpi_alarm(monitoring_client):
+def test_set_kpi_alarm(monitoring_client): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_set_kpi_alarm')
     kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
     response = monitoring_client.SetKpiAlarm(alarm_descriptor(kpi_id))
@@ -349,14 +331,14 @@ def test_set_kpi_alarm(monitoring_client):
     assert isinstance(response, AlarmID)
 
 # Test case that makes use of client fixture to test server's GetAlarms method
-def test_get_alarms(monitoring_client):
+def test_get_alarms(monitoring_client): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_get_alarms')
     response = monitoring_client.GetAlarms(Empty())
     LOGGER.debug(response)
     assert isinstance(response, AlarmList)
 
 # Test case that makes use of client fixture to test server's GetAlarmDescriptor method
-def test_get_alarm_descriptor(monitoring_client):
+def test_get_alarm_descriptor(monitoring_client): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_get_alarm_descriptor')
     _kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
     _alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor(_kpi_id))
@@ -365,7 +347,7 @@ def test_get_alarm_descriptor(monitoring_client):
     assert isinstance(_response, AlarmDescriptor)
 
 # Test case that makes use of client fixture to test server's GetAlarmResponseStream method
-def test_get_alarm_response_stream(monitoring_client,subs_scheduler):
+def test_get_alarm_response_stream(monitoring_client,subs_scheduler): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_get_alarm_descriptor')
     _kpi_id = monitoring_client.SetKpi(create_kpi_request('3'))
     _alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor(_kpi_id))
@@ -380,7 +362,7 @@ def test_get_alarm_response_stream(monitoring_client,subs_scheduler):
         subs_scheduler.shutdown()
 
 # Test case that makes use of client fixture to test server's DeleteAlarm method
-def test_delete_alarm(monitoring_client):
+def test_delete_alarm(monitoring_client): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_delete_alarm')
     _kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
     _alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor(_kpi_id))
@@ -408,15 +390,17 @@ def test_get_stream_kpi(monitoring_client): # pylint: disable=redefined-outer-na
 def test_managementdb_tools_kpis(management_db): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_managementdb_tools_kpis begin')
     _create_kpi_request = create_kpi_request('5')
-    kpi_description = _create_kpi_request.kpi_description                # pylint: disable=maybe-no-member
-    kpi_sample_type = _create_kpi_request.kpi_sample_type                # pylint: disable=maybe-no-member
-    kpi_device_id   = _create_kpi_request.device_id.device_uuid.uuid     # pylint: disable=maybe-no-member
-    kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member
-    kpi_service_id  = _create_kpi_request.service_id.service_uuid.uuid   # pylint: disable=maybe-no-member
-    kpi_slice_id = _create_kpi_request.slice_id.slice_uuid.uuid
-    kpi_connection_id  = _create_kpi_request.connection_id.connection_uuid.uuid
-
-    _kpi_id = management_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id,kpi_slice_id,kpi_connection_id)
+    kpi_description    = _create_kpi_request.kpi_description                    # pylint: disable=maybe-no-member
+    kpi_sample_type    = _create_kpi_request.kpi_sample_type                    # pylint: disable=maybe-no-member
+    kpi_device_id      = _create_kpi_request.device_id.device_uuid.uuid         # pylint: disable=maybe-no-member
+    kpi_endpoint_id    = _create_kpi_request.endpoint_id.endpoint_uuid.uuid     # pylint: disable=maybe-no-member
+    kpi_service_id     = _create_kpi_request.service_id.service_uuid.uuid       # pylint: disable=maybe-no-member
+    kpi_slice_id       = _create_kpi_request.slice_id.slice_uuid.uuid           # pylint: disable=maybe-no-member
+    kpi_connection_id  = _create_kpi_request.connection_id.connection_uuid.uuid # pylint: disable=maybe-no-member
+
+    _kpi_id = management_db.insert_KPI(
+        kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id,
+        kpi_slice_id, kpi_connection_id)
     assert isinstance(_kpi_id, int)
 
     response = management_db.get_KPI(_kpi_id)
@@ -517,30 +501,16 @@ def test_managementdb_tools_insert_alarm(management_db):
 #     assert total_points != 0
 
 def test_events_tools(
-        context_client : ContextClient,                 # pylint: disable=redefined-outer-name
+        context_client : ContextClient,                 # pylint: disable=redefined-outer-name,unused-argument
         device_client : DeviceClient,                   # pylint: disable=redefined-outer-name
-        monitoring_client : MonitoringClient,           # pylint: disable=redefined-outer-name
-        context_db_mb : Tuple[Database, MessageBroker]  # pylint: disable=redefined-outer-name
+        monitoring_client : MonitoringClient,           # pylint: disable=redefined-outer-name,unused-argument
     ):
     LOGGER.warning('test_get_device_events begin')
 
-    context_database = context_db_mb[0]
-
-    # ----- Clean the database -----------------------------------------------------------------------------------------
-    context_database.clear_all()
-
     # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
     events_collector = EventsDeviceCollector()
     events_collector.start()
 
-    # ----- Dump state of database before create the object ------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 0
-
     # ----- Update the object ------------------------------------------------------------------------------------------
     LOGGER.info('Adding Device {:s}'.format(DEVICE_DEV1_UUID))
     device_with_connect_rules = copy.deepcopy(DEVICE_DEV1)
@@ -548,35 +518,22 @@ def test_events_tools(
     response = device_client.AddDevice(Device(**device_with_connect_rules))
     assert response.device_uuid.uuid == DEVICE_DEV1_UUID
 
+    device_client.DeleteDevice(response)
     events_collector.stop()
 
 
 def test_get_device_events(
-        context_client : ContextClient,                 # pylint: disable=redefined-outer-name
+        context_client : ContextClient,                 # pylint: disable=redefined-outer-name,unused-argument
         device_client : DeviceClient,                   # pylint: disable=redefined-outer-name
-        monitoring_client : MonitoringClient,           # pylint: disable=redefined-outer-name
-        context_db_mb : Tuple[Database, MessageBroker]  # pylint: disable=redefined-outer-name
+        monitoring_client : MonitoringClient,           # pylint: disable=redefined-outer-name,unused-argument
     ):
 
     LOGGER.warning('test_get_device_events begin')
 
-    context_database = context_db_mb[0]
-
-    # ----- Clean the database -----------------------------------------------------------------------------------------
-    context_database.clear_all()
-
     # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
     events_collector = EventsDeviceCollector()
     events_collector.start()
 
-    # ----- Dump state of database before create the object ------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 0
-
     # ----- Check create event -----------------------------------------------------------------------------------------
     LOGGER.info('Adding Device {:s}'.format(DEVICE_DEV1_UUID))
     device_with_connect_rules = copy.deepcopy(DEVICE_DEV1)
@@ -589,43 +546,31 @@ def test_get_device_events(
     assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
     assert event.device_id.device_uuid.uuid == DEVICE_DEV1_UUID
 
+    device_client.DeleteDevice(response)
     events_collector.stop()
 
 def test_listen_events(
-        context_client : ContextClient,                 # pylint: disable=redefined-outer-name
+        context_client : ContextClient,                 # pylint: disable=redefined-outer-name,unused-argument
         device_client : DeviceClient,                   # pylint: disable=redefined-outer-name
-        monitoring_client : MonitoringClient,           # pylint: disable=redefined-outer-name
-        context_db_mb : Tuple[Database, MessageBroker]  # pylint: disable=redefined-outer-name
+        monitoring_client : MonitoringClient,           # pylint: disable=redefined-outer-name,unused-argument
     ):
 
     LOGGER.warning('test_listen_events begin')
 
-    context_database = context_db_mb[0]
-
-    # ----- Clean the database -----------------------------------------------------------------------------------------
-    context_database.clear_all()
-
     # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
     events_collector = EventsDeviceCollector()
     events_collector.start()
 
-    # ----- Dump state of database before create the object ------------------------------------------------------------
-    db_entries = context_database.dump()
-    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
-    for db_entry in db_entries:
-        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
-    LOGGER.info('-----------------------------------------------------------')
-    assert len(db_entries) == 0
-
     LOGGER.info('Adding Device {:s}'.format(DEVICE_DEV1_UUID))
     device_with_connect_rules = copy.deepcopy(DEVICE_DEV1)
     device_with_connect_rules['device_config']['config_rules'].extend(DEVICE_DEV1_CONNECT_RULES)
     response = device_client.AddDevice(Device(**device_with_connect_rules))
     assert response.device_uuid.uuid == DEVICE_DEV1_UUID
 
-    sleep(0.1)
+    sleep(1.0)
 
     kpi_id_list = events_collector.listen_events()
-
     assert len(kpi_id_list) > 0
+
+    device_client.DeleteDevice(response)
     events_collector.stop()
diff --git a/src/opticalcentralizedattackdetector/tests/example_objects.py b/src/opticalcentralizedattackdetector/tests/example_objects.py
index 3c5a26b6d0bde888560741f052906e0d2694c91d..09320f1c32e6b3d2ffdde2e4d061aa8c21f56ed2 100644
--- a/src/opticalcentralizedattackdetector/tests/example_objects.py
+++ b/src/opticalcentralizedattackdetector/tests/example_objects.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 from copy import deepcopy
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from context.proto.context_pb2 import (
     ConfigActionEnum, DeviceDriverEnum, DeviceOperationalStatusEnum, ServiceStatusEnum, ServiceTypeEnum)
 
@@ -31,7 +31,7 @@ def endpoint(topology_id, device_id, endpoint_uuid, endpoint_type):
     return {'endpoint_id': endpoint_id(topology_id, device_id, endpoint_uuid), 'endpoint_type': endpoint_type}
 
 ## use "deepcopy" to prevent propagating forced changes during tests
-CONTEXT_ID = {'context_uuid': {'uuid': DEFAULT_CONTEXT_UUID}}
+CONTEXT_ID = {'context_uuid': {'uuid': DEFAULT_CONTEXT_NAME}}
 CONTEXT = {
     'context_id': deepcopy(CONTEXT_ID),
     'topology_ids': [],
@@ -47,7 +47,7 @@ CONTEXT_2 = {
 
 TOPOLOGY_ID = {
     'context_id': deepcopy(CONTEXT_ID),
-    'topology_uuid': {'uuid': DEFAULT_TOPOLOGY_UUID},
+    'topology_uuid': {'uuid': DEFAULT_TOPOLOGY_NAME},
 }
 TOPOLOGY = {
     'topology_id': deepcopy(TOPOLOGY_ID),
diff --git a/src/pathcomp/.gitlab-ci.yml b/src/pathcomp/.gitlab-ci.yml
index a45e735e4c07299c4853abdff5b6c39963a87a78..78791353997b85c07e154ac465d60202464345f3 100644
--- a/src/pathcomp/.gitlab-ci.yml
+++ b/src/pathcomp/.gitlab-ci.yml
@@ -48,7 +48,7 @@ build pathcomp:
       - .gitlab-ci.yml
 
 # Apply unit test to the component
-unit test pathcomp-backend:
+unit_test pathcomp-backend:
   variables:
     IMAGE_NAME: 'pathcomp' # name of the microservice
     IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
@@ -62,6 +62,7 @@ unit test pathcomp-backend:
     - if docker container ls | grep ${IMAGE_NAME}-backend; then docker rm -f ${IMAGE_NAME}-backend; else echo "${IMAGE_NAME}-backend image is not in the system"; fi
   script:
     - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG"
+    - docker ps -a
     #- docker run --name ${IMAGE_NAME}-backend -d -p 8081:8081 -v "$PWD/src/${IMAGE_NAME}/backend/tests:/opt/results" --network=teraflowbridge ${IMAGE_NAME}-backend:${IMAGE_TAG}-builder
     - docker run --name ${IMAGE_NAME}-backend -d -p 8081:8081 --network=teraflowbridge ${IMAGE_NAME}-backend:${IMAGE_TAG}-builder
     - sleep 5
@@ -93,7 +94,7 @@ unit test pathcomp-backend:
   #      junit: src/$IMAGE_NAME/backend/tests/${IMAGE_NAME}-backend_report.xml
 
 # Apply unit test to the component
-unit test pathcomp-frontend:
+unit_test pathcomp-frontend:
   variables:
     IMAGE_NAME: 'pathcomp' # name of the microservice
     IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
@@ -102,22 +103,35 @@ unit test pathcomp-frontend:
     - build pathcomp
   before_script:
     - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
-    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge --subnet=172.28.0.0/24 --gateway=172.28.0.254 teraflowbridge; fi
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge teraflowbridge; fi
     - if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend image is not in the system"; fi
     - if docker container ls | grep ${IMAGE_NAME}-backend; then docker rm -f ${IMAGE_NAME}-backend; else echo "${IMAGE_NAME}-backend image is not in the system"; fi
   script:
     - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG"
     - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG"
-    - docker run --name ${IMAGE_NAME}-backend -d -p 8081:8081 -v "$PWD/src/${IMAGE_NAME}/backend/tests:/opt/results" --network=teraflowbridge --ip 172.28.0.1 $CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG
+    - >
+      docker run --name ${IMAGE_NAME}-backend -d -p 8081:8081 --network=teraflowbridge
+      --volume "$PWD/src/${IMAGE_NAME}/backend/tests:/opt/results"
+      $CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG
+    - PATHCOMP_BACKEND_HOST=$(docker inspect ${IMAGE_NAME}-backend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $PATHCOMP_BACKEND_HOST
+    - sleep 1
+    - >
+      docker run --name ${IMAGE_NAME}-frontend -d -p 10020:10020 --network=teraflowbridge
+      --volume "$PWD/src/${IMAGE_NAME}/frontend/tests:/opt/results"
+      --env "PATHCOMP_BACKEND_HOST=${PATHCOMP_BACKEND_HOST}"
+      --env "PATHCOMP_BACKEND_PORT=8081"
+      $CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG
     - sleep 1
-    - docker run --name ${IMAGE_NAME}-frontend -d -p 10020:10020 --env "PATHCOMP_BACKEND_HOST=172.28.0.1" --env "PATHCOMP_BACKEND_PORT=8081" -v "$PWD/src/${IMAGE_NAME}/frontend/tests:/opt/results" --network=teraflowbridge --ip 172.28.0.2 $CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG
     - docker exec -i ${IMAGE_NAME}-frontend bash -c "env"
     - docker exec -i ${IMAGE_NAME}-backend bash -c "env"
     - sleep 5
     - docker ps -a
     - docker logs ${IMAGE_NAME}-frontend
     - docker logs ${IMAGE_NAME}-backend
-    - docker exec -i ${IMAGE_NAME}-frontend bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/frontend/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}-frontend_report.xml"
+    - >
+      docker exec -i ${IMAGE_NAME}-frontend bash -c 
+      "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/frontend/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}-frontend_report.xml"
     - docker exec -i ${IMAGE_NAME}-frontend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
@@ -147,29 +161,29 @@ unit test pathcomp-frontend:
       reports:
         junit: src/$IMAGE_NAME/frontend/tests/${IMAGE_NAME}-frontend_report.xml
 
-# Deployment of the service in Kubernetes Cluster
-deploy pathcomp:
-  variables:
-    IMAGE_NAME: 'pathcomp' # name of the microservice
-    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
-  stage: deploy
-  needs:
-    - unit test pathcomp-backend
-    - unit test pathcomp-frontend
-    # - integ_test execute
-  script:
-    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
-    - kubectl version
-    - kubectl get all
-    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
-    - kubectl get all
-  # environment:
-  #   name: test
-  #   url: https://example.com
-  #   kubernetes:
-  #     namespace: test
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
-      when: manual    
-    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
-      when: manual
+## Deployment of the service in Kubernetes Cluster
+#deploy pathcomp:
+#  variables:
+#    IMAGE_NAME: 'pathcomp' # name of the microservice
+#    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+#  stage: deploy
+#  needs:
+#    - unit test pathcomp-backend
+#    - unit test pathcomp-frontend
+#    # - integ_test execute
+#  script:
+#    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+#    - kubectl get all
+#  # environment:
+#  #   name: test
+#  #   url: https://example.com
+#  #   kubernetes:
+#  #     namespace: test
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual    
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
diff --git a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
index ca4132754fc4886704cb2984519ebc21a19bfd9c..9f4cd73334e193320137277d88fdf2901ff14515 100644
--- a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
+++ b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import grpc, logging, threading
-from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, INTERDOMAIN_TOPOLOGY_NAME
 from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
 from common.proto.context_pb2 import ContextId, Empty
 from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest
@@ -30,7 +30,7 @@ LOGGER = logging.getLogger(__name__)
 
 METRICS_POOL = MetricsPool('PathComp', 'RPC')
 
-ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
 
 class PathCompServiceServicerImpl(PathCompServiceServicer):
     def __init__(self) -> None:
@@ -45,8 +45,8 @@ class PathCompServiceServicerImpl(PathCompServiceServicer):
         context_client = ContextClient()
 
         if (len(request.services) == 1) and is_inter_domain(context_client, request.services[0].service_endpoint_ids):
-            devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID)
-            links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID)
+            devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
+            links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME)
         else:
             # TODO: improve filtering of devices and links
             # TODO: add contexts, topologies, and membership of devices/links in topologies
diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py
index a24ef769313c7d71d28e6bcc5526cbc398e05c08..5c49a1feccfd3abab6df418fd5e5e1f2f3577a2a 100644
--- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py
+++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py
@@ -93,7 +93,7 @@ class _Algorithm:
     def execute(self, dump_request_filename : Optional[str] = None, dump_reply_filename : Optional[str] = None) -> None:
         request = {'serviceList': self.service_list, 'deviceList': self.device_list, 'linkList': self.link_list}
 
-        self.logger.debug('[execute] request={:s}'.format(str(request)))
+        self.logger.debug('[execute] request={:s}'.format(json.dumps(request, sort_keys=True, indent=4)))
         if dump_request_filename is not None:
             with open(dump_request_filename, 'w', encoding='UTF-8') as f:
                 f.write(json.dumps(request, sort_keys=True, indent=4))
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
index 17a7e74ef573e4926d53045ab8888c71a3dd73d7..0832615a140c5d5bb5e1d0db9f7a784b0190043a 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py
@@ -14,7 +14,7 @@
 
 import logging
 from typing import Dict
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.proto.context_pb2 import Constraint, Device, EndPointId, Link, Service, ServiceId, TopologyId
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from .ConstantsMappings import (
@@ -22,23 +22,27 @@ from .ConstantsMappings import (
 
 LOGGER = logging.getLogger(__name__)
 
-LOGGER = logging.getLogger(__name__)
+def compose_topology_id(topology_id : TopologyId) -> Dict: # pylint: disable=unused-argument
+    # force context_uuid and topology_uuid to be always DEFAULT_CONTEXT_NAME and DEFAULT_TOPOLOGY_NAME for simplicity
+    # for interdomain, contexts and topologies are managed in particular ways
 
-def compose_topology_id(topology_id : TopologyId) -> Dict:
-    context_uuid = topology_id.context_id.context_uuid.uuid
-    topology_uuid = topology_id.topology_uuid.uuid
+    context_uuid = DEFAULT_CONTEXT_NAME
+    #context_uuid = topology_id.context_id.context_uuid.uuid
+    #if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_NAME
 
-    if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_UUID
-    if len(topology_uuid) == 0: topology_uuid = DEFAULT_TOPOLOGY_UUID
+    topology_uuid = DEFAULT_TOPOLOGY_NAME
+    #topology_uuid = topology_id.topology_uuid.uuid
+    #if len(topology_uuid) == 0: topology_uuid = DEFAULT_TOPOLOGY_NAME
 
     return {'contextId': context_uuid, 'topology_uuid': topology_uuid}
 
 def compose_service_id(service_id : ServiceId) -> Dict:
-    # force context_uuid to be always DEFAULT_CONTEXT_UUID for simplicity
-    # for interdomain contexts are managed in a particular way
+    # force context_uuid to be always DEFAULT_CONTEXT_NAME for simplicity
+    # for interdomain, contexts are managed in particular ways
+
     #context_uuid = service_id.context_id.context_uuid.uuid
-    #if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_UUID
-    context_uuid = DEFAULT_CONTEXT_UUID
+    #if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_NAME
+    context_uuid = DEFAULT_CONTEXT_NAME
 
     service_uuid = service_id.service_uuid.uuid
     return {'contextId': context_uuid, 'service_uuid': service_uuid}
diff --git a/src/pathcomp/frontend/tests/Objects_A_B_C.py b/src/pathcomp/frontend/tests/Objects_A_B_C.py
index 510ebb6746ccb8d050d5eb6ea91ec6354f224459..2deab06f48f0d8edd7ec701713552cbec892071f 100644
--- a/src/pathcomp/frontend/tests/Objects_A_B_C.py
+++ b/src/pathcomp/frontend/tests/Objects_A_B_C.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.tools.object_factory.Constraint import json_constraint_custom
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import json_device_emulated_packet_router_disabled, json_device_id
@@ -41,11 +41,11 @@ def compose_service(endpoint_a, endpoint_z, constraints=[]):
     return service
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 # ----- Domains --------------------------------------------------------------------------------------------------------
-TOPOLOGY_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID
+TOPOLOGY_ADMIN_UUID = DEFAULT_TOPOLOGY_NAME
 TOPOLOGY_ADMIN_ID   = json_topology_id(TOPOLOGY_ADMIN_UUID, context_id=CONTEXT_ID)
 TOPOLOGY_ADMIN      = json_topology(TOPOLOGY_ADMIN_UUID, context_id=CONTEXT_ID)
 
diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
index 06e9bbbc715a85a2c0d979584c58b268bff687e6..33483267b82eb49f13403cf6011777dc1c160fd0 100644
--- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
+++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.tools.object_factory.Constraint import json_constraint_custom
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
@@ -58,12 +58,12 @@ def compose_service(endpoint_a, endpoint_z, constraints=[]):
     return service
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 # ----- Domains --------------------------------------------------------------------------------------------------------
 # Overall network topology
-TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID
+TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_NAME
 TOPO_ADMIN_ID   = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
 TOPO_ADMIN      = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
 
diff --git a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
index 99fd83ed9e1a7ca27faa6acb11b07abd573423ef..1ff3ff595c3833e747a2ddcc6af7fc5ccc56bf17 100644
--- a/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
+++ b/src/pathcomp/frontend/tests/Objects_DC_CSGW_TN_OLS.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import uuid
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.tools.object_factory.Constraint import json_constraint_custom
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
@@ -68,12 +68,12 @@ def compose_service(endpoint_a, endpoint_z, constraints=[]):
     return service
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 # ----- Domains --------------------------------------------------------------------------------------------------------
 # Overall network topology
-TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID
+TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_NAME
 TOPO_ADMIN_ID   = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
 TOPO_ADMIN      = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
 
diff --git a/src/policy/.gitlab-ci.yml b/src/policy/.gitlab-ci.yml
index 164540a05172666375f503549f098ef3a90cdf06..f257c554c95dee8230c0fc756842d0e71570cad0 100644
--- a/src/policy/.gitlab-ci.yml
+++ b/src/policy/.gitlab-ci.yml
@@ -79,21 +79,21 @@ unit_test policy:
         - manifests/${IMAGE_NAME_POLICY}service.yaml
         - .gitlab-ci.yml
 
-# Deployment of policy service in Kubernetes Cluster
-deploy policy:
-  stage: deploy
-  needs:
-    - build policy
-    - unit_test policy
-  script:
-    - kubectl version
-    - kubectl get all
-    - kubectl delete --ignore-not-found=true -f "manifests/policyservice.yaml"
-    - kubectl apply -f "manifests/policyservice.yaml"
-    - kubectl delete pods --selector app=policyservice
-    - kubectl get all
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
-      when: manual
-    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
-      when: manual
\ No newline at end of file
+## Deployment of policy service in Kubernetes Cluster
+#deploy policy:
+#  stage: deploy
+#  needs:
+#    - build policy
+#    - unit_test policy
+#  script:
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl delete --ignore-not-found=true -f "manifests/policyservice.yaml"
+#    - kubectl apply -f "manifests/policyservice.yaml"
+#    - kubectl delete pods --selector app=policyservice
+#    - kubectl get all
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
\ No newline at end of file
diff --git a/src/policy/src/main/resources/application.yml b/src/policy/src/main/resources/application.yml
index 3d992763422c9044136c38f21cf6d609281f86ca..fa7dff7b20defc1c2d35e537ea66fceec79a1323 100644
--- a/src/policy/src/main/resources/application.yml
+++ b/src/policy/src/main/resources/application.yml
@@ -34,9 +34,9 @@ quarkus:
     port: 8080
 
   container-image:
-    group: teraflow-h2020
+    group: tfs
     name: controller/policy
-    registry: registry.gitlab.com
+    registry: labs.etsi.org:5050
 
   kubernetes:
     name: policyservice
diff --git a/src/policy/target/kubernetes/kubernetes.yml b/src/policy/target/kubernetes/kubernetes.yml
index 1a2b4e26c2147273256587e5580265464be69758..51ed10e4a74b7f711cf2c8ff9b0db85fdb4e7247 100644
--- a/src/policy/target/kubernetes/kubernetes.yml
+++ b/src/policy/target/kubernetes/kubernetes.yml
@@ -57,7 +57,7 @@ spec:
               value: contextservice
             - name: SERVICE_SERVICE_HOST
               value: serviceservice
-          image: registry.gitlab.com/teraflow-h2020/controller/policy:0.1.0
+          image: labs.etsi.org:5050/tfs/controller/policy:0.1.0
           imagePullPolicy: Always
           livenessProbe:
             failureThreshold: 3
diff --git a/src/service/.gitlab-ci.yml b/src/service/.gitlab-ci.yml
index c40bc90cfe0b20669fe89aa9d6ecda562d7d0422..c6d4b185f00c6a6425a770079da00e5b8d77c0a1 100644
--- a/src/service/.gitlab-ci.yml
+++ b/src/service/.gitlab-ci.yml
@@ -39,7 +39,7 @@ build service:
       - .gitlab-ci.yml
 
 # Apply unit test to the component
-unit test service:
+unit_test service:
   variables:
     IMAGE_NAME: 'service' # name of the microservice
     IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
@@ -48,20 +48,138 @@ unit test service:
     - build service
   before_script:
     - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
-    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge teraflowbridge; fi
+
+    # Context-related
+    - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi
+    - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi
+    - if docker container ls | grep nats; then docker rm -f nats; else echo "NATS container is not in the system"; fi
+
+    # Device-related
+    - if docker container ls | grep context; then docker rm -f context; else echo "context image is not in the system"; fi
+    - if docker container ls | grep device; then docker rm -f device; else echo "device image is not in the system"; fi
+
+    # Pathcomp-related
+    - if docker container ls | grep pathcomp-frontend; then docker rm -f pathcomp-frontend; else echo "pathcomp-frontend image is not in the system"; fi
+    - if docker container ls | grep pathcomp-backend; then docker rm -f pathcomp-backend; else echo "pathcomp-backend image is not in the system"; fi
+
+    # Service-related
     - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
+
   script:
+    - docker pull "cockroachdb/cockroach:latest-v22.2"
+    - docker pull "nats:2.9"
+    - docker pull "$CI_REGISTRY_IMAGE/context:$IMAGE_TAG"
+    - docker pull "$CI_REGISTRY_IMAGE/device:$IMAGE_TAG"
+    - docker pull "$CI_REGISTRY_IMAGE/pathcomp-frontend:$IMAGE_TAG"
+    - docker pull "$CI_REGISTRY_IMAGE/pathcomp-backend:$IMAGE_TAG"
     - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
-    - docker run --name $IMAGE_NAME -d -p 3030:3030 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+
+    # Context preparation
+    - docker volume create crdb
+    - >
+      docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080
+      --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123
+      --volume "crdb:/cockroach/cockroach-data"
+      cockroachdb/cockroach:latest-v22.2 start-single-node
+    - >
+      docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222
+      nats:2.9 --http_port 8222 --user tfs --pass tfs123
+    - echo "Waiting for initialization..."
+    - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done
+    - docker logs crdb
+    - while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done
+    - docker logs nats
+    - docker ps -a
+    - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $CRDB_ADDRESS
+    - NATS_ADDRESS=$(docker inspect nats --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $NATS_ADDRESS
+    - >
+      docker run --name context -d -p 1010:1010
+      --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require"
+      --env "MB_BACKEND=nats"
+      --env "NATS_URI=nats://tfs:tfs123@${NATS_ADDRESS}:4222"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/context:$IMAGE_TAG
+    - CONTEXTSERVICE_SERVICE_HOST=$(docker inspect context --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $CONTEXTSERVICE_SERVICE_HOST
+
+    # Device preparation
+    - >
+      docker run --name device -d -p 2020:2020
+      --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/device:$IMAGE_TAG
+    - DEVICESERVICE_SERVICE_HOST=$(docker inspect device --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $DEVICESERVICE_SERVICE_HOST
+
+    # PathComp preparation
+    - >
+      docker run --name pathcomp-backend -d -p 8081:8081
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/pathcomp-backend:$IMAGE_TAG
+    - PATHCOMP_BACKEND_HOST=$(docker inspect pathcomp-backend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $PATHCOMP_BACKEND_HOST
+    - sleep 1
+    - >
+      docker run --name pathcomp-frontend -d -p 10020:10020
+      --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}"
+      --env "PATHCOMP_BACKEND_HOST=${PATHCOMP_BACKEND_HOST}"
+      --env "PATHCOMP_BACKEND_PORT=8081"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/pathcomp-frontend:$IMAGE_TAG
+    - sleep 1
+    - PATHCOMPSERVICE_SERVICE_HOST=$(docker inspect pathcomp-frontend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $PATHCOMPSERVICE_SERVICE_HOST
+
+    # Service preparation
+    - >
+      docker run --name $IMAGE_NAME -d -p 3030:3030
+      --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}"
+      --env "DEVICESERVICE_SERVICE_HOST=${DEVICESERVICE_SERVICE_HOST}"
+      --env "PATHCOMPSERVICE_SERVICE_HOST=${PATHCOMPSERVICE_SERVICE_HOST}"
+      --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+
+    # Check status before the tests
     - sleep 5
     - docker ps -a
+    - docker logs context
+    - docker logs device
+    - docker logs pathcomp-frontend
+    - docker logs pathcomp-backend
     - docker logs $IMAGE_NAME
-    - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
+
+    # Run the tests
+    - >
+      docker exec -i $IMAGE_NAME bash -c
+      "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
     - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
+    # Check status after the tests
+    - docker ps -a
+    - docker logs context
+    - docker logs device
+    - docker logs pathcomp-frontend
+    - docker logs pathcomp-backend
+    - docker logs $IMAGE_NAME
+
     - docker rm -f $IMAGE_NAME
+    - docker rm -f pathcomp-frontend
+    - docker rm -f pathcomp-backend
+    - docker rm -f device
+    - docker rm -f context
+
+    - docker rm -f $IMAGE_NAME crdb nats
+    - docker volume rm -f crdb
     - docker network rm teraflowbridge
+    - docker volume prune --force
+    - docker image prune --force
+
   rules:
     - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
     - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
@@ -71,36 +189,36 @@ unit test service:
       - src/$IMAGE_NAME/**/*.{py,in,yml}
       - src/$IMAGE_NAME/Dockerfile
       - src/$IMAGE_NAME/tests/*.py
-      - src/$IMAGE_NAME/tests/Dockerfile
       - manifests/${IMAGE_NAME}service.yaml
       - .gitlab-ci.yml
+
   artifacts:
       when: always
       reports:
         junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
 
-# Deployment of the service in Kubernetes Cluster
-deploy service:
-  variables:
-    IMAGE_NAME: 'service' # name of the microservice
-    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
-  stage: deploy
-  needs:
-    - unit test service
-    # - integ_test execute
-  script:
-    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
-    - kubectl version
-    - kubectl get all
-    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
-    - kubectl get all
-  # environment:
-  #   name: test
-  #   url: https://example.com
-  #   kubernetes:
-  #     namespace: test
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
-      when: manual    
-    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
-      when: manual
+## Deployment of the service in Kubernetes Cluster
+#deploy service:
+#  variables:
+#    IMAGE_NAME: 'service' # name of the microservice
+#    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+#  stage: deploy
+#  needs:
+#    - unit test service
+#    # - integ_test execute
+#  script:
+#    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+#    - kubectl get all
+#  # environment:
+#  #   name: test
+#  #   url: https://example.com
+#  #   kubernetes:
+#  #     namespace: test
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual    
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
diff --git a/src/service/tests/CommonObjects.py b/src/service/tests/CommonObjects.py
index 7792ad61d0e537911f593ec29d8366bb59fbb9f6..b84846ca4953ffb6ed1f93d329d562fec9df4e11 100644
--- a/src/service/tests/CommonObjects.py
+++ b/src/service/tests/CommonObjects.py
@@ -12,18 +12,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Topology import json_topology, json_topology_id
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 # ----- Topology -------------------------------------------------------------------------------------------------------
-TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
-TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
+TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID)
+TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID)
 
 # ----- Monitoring Samples ---------------------------------------------------------------------------------------------
 PACKET_PORT_SAMPLE_TYPES = [
diff --git a/src/slice/.gitlab-ci.yml b/src/slice/.gitlab-ci.yml
index 9393e6b29a2fbe180e74944375c871af4c4ae3d6..c1b2eb4874d1606a2dfec8b7618e7da66e619567 100644
--- a/src/slice/.gitlab-ci.yml
+++ b/src/slice/.gitlab-ci.yml
@@ -39,13 +39,14 @@ build slice:
       - .gitlab-ci.yml
 
 # Apply unit test to the component
-unit test slice:
+unit_test slice:
   variables:
     IMAGE_NAME: 'slice' # name of the microservice
     IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
   stage: unit_test
   needs:
     - build slice
+    - unit_test service
   before_script:
     - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
     - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
@@ -79,28 +80,28 @@ unit test slice:
       reports:
         junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
 
-# Deployment of the service in Kubernetes Cluster
-deploy slice:
-  variables:
-    IMAGE_NAME: 'slice' # name of the microservice
-    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
-  stage: deploy
-  needs:
-    - unit test slice
-    # - integ_test execute
-  script:
-    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
-    - kubectl version
-    - kubectl get all
-    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
-    - kubectl get all
-  # environment:
-  #   name: test
-  #   url: https://example.com
-  #   kubernetes:
-  #     namespace: test
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
-      when: manual    
-    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
-      when: manual
+## Deployment of the service in Kubernetes Cluster
+#deploy slice:
+#  variables:
+#    IMAGE_NAME: 'slice' # name of the microservice
+#    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+#  stage: deploy
+#  needs:
+#    - unit test slice
+#    # - integ_test execute
+#  script:
+#    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+#    - kubectl get all
+#  # environment:
+#  #   name: test
+#  #   url: https://example.com
+#  #   kubernetes:
+#  #     namespace: test
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual    
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py
index aa41a77ac7b5ce1ec6dabba0f841692ce2f8f42e..d693abd8f3470fe59d5664073c2f3c50f53234e2 100644
--- a/src/slice/service/SliceServiceServicerImpl.py
+++ b/src/slice/service/SliceServiceServicerImpl.py
@@ -81,7 +81,7 @@ class SliceServiceServicerImpl(SliceServiceServicer):
         service_id = ServiceId()
         # pylint: disable=no-member
         context_uuid = service_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid
-        slice_uuid = service_uuid = service_id.service_uuid.uuid = request.slice_id.slice_uuid.uuid
+        service_uuid = service_id.service_uuid.uuid = request.slice_id.slice_uuid.uuid
 
         service_client = ServiceClient()
         try:
@@ -92,10 +92,7 @@ class SliceServiceServicerImpl(SliceServiceServicer):
             service_request.service_id.CopyFrom(service_id)
             service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN
             service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED
-            service_reply = service_client.CreateService(service_request)
-            if service_reply != service_request.service_id: # pylint: disable=no-member
-                # pylint: disable=raise-missing-from
-                raise Exception('Service creation failed. Wrong Service Id was returned')
+            service_client.CreateService(service_request)
             _service = context_client.GetService(service_id)
         service_request = Service()
         service_request.CopyFrom(_service)
@@ -137,9 +134,7 @@ class SliceServiceServicerImpl(SliceServiceServicer):
             service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM
             LOGGER.info('assume L2')
 
-        service_reply = service_client.UpdateService(service_request)
-        if service_reply != service_request.service_id: # pylint: disable=no-member
-            raise Exception('Service update failed. Wrong Service Id was returned')
+        service_client.UpdateService(service_request)
 
         copy_endpoint_ids(request.slice_endpoint_ids, slice_request.slice_endpoint_ids)
         copy_constraints(request.slice_constraints, slice_request.slice_constraints)
diff --git a/src/tests/benchmark/automation/tests/Objects.py b/src/tests/benchmark/automation/tests/Objects.py
index 8ea6f500807e3dbcc2e34dbd559614ff91c955d8..1e8072f8fab4c2eca01b64d289866e7f5d230f0a 100644
--- a/src/tests/benchmark/automation/tests/Objects.py
+++ b/src/tests/benchmark/automation/tests/Objects.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 from typing import Dict, List, Tuple
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
     json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled,
@@ -24,12 +24,12 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 # ----- Topology -------------------------------------------------------------------------------------------------------
-TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
-TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
+TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID)
+TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID)
 
 # ----- Monitoring Samples ---------------------------------------------------------------------------------------------
 PACKET_PORT_SAMPLE_TYPES = [
diff --git a/src/tests/benchmark/policy/tests/test_functional_delete_service.py b/src/tests/benchmark/policy/tests/test_functional_delete_service.py
index 0f8d088012bed164e4603a813bfe9154eda8f568..48c2a0d5a16db038ac35c1226c33989d31a23e74 100644
--- a/src/tests/benchmark/policy/tests/test_functional_delete_service.py
+++ b/src/tests/benchmark/policy/tests/test_functional_delete_service.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import logging
-from common.Constants import DEFAULT_CONTEXT_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME
 from common.DeviceTypes import DeviceTypeEnum
 from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
 from common.tools.descriptor.Loader import DescriptorLoader
@@ -55,7 +55,7 @@ def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # p
     assert len(response.links) == descriptor_loader.num_links
 
     l3nm_service_uuids = set()
-    response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)))
+    response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)))
     assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI)
     for service in response.services:
         service_id = service.service_id
diff --git a/src/tests/ecoc22/tests/Objects_BigNet.py b/src/tests/ecoc22/tests/Objects_BigNet.py
index 592376ff9dbaebbf4d8d02b04189e5d4f24584e3..b9e70517cd29817774320c7efb1b28d11ba00527 100644
--- a/src/tests/ecoc22/tests/Objects_BigNet.py
+++ b/src/tests/ecoc22/tests/Objects_BigNet.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
     json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled,
@@ -21,13 +21,13 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id
 from .Tools import compose_bearer, compose_service_endpoint_id, json_endpoint_ids, link
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 
 # ----- Topology -------------------------------------------------------------------------------------------------------
-TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
-TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
+TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID)
+TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID)
 
 
 # ----- Customer Equipment (CE) Devices --------------------------------------------------------------------------------
diff --git a/src/tests/ecoc22/tests/Objects_DC_CSGW_OLS.py b/src/tests/ecoc22/tests/Objects_DC_CSGW_OLS.py
index 94d205a64681c7b1978524c1938cbc6b944afb58..37ceeae6a3cf8cf3eebfb191d24a3864d56805ef 100644
--- a/src/tests/ecoc22/tests/Objects_DC_CSGW_OLS.py
+++ b/src/tests/ecoc22/tests/Objects_DC_CSGW_OLS.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import os, uuid
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
     json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled,
@@ -68,12 +68,12 @@ def compose_service(endpoint_a, endpoint_z, constraints=[]):
     return service
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 # ----- Domains --------------------------------------------------------------------------------------------------------
 # Overall network topology
-TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID
+TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_NAME
 TOPO_ADMIN_ID   = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
 TOPO_ADMIN      = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
 
diff --git a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py
index 229e3d5fe3cee54fb7295ac0049507ec4e348a04..f29999d6c83ee00db2923261bb6bf2845e4edf38 100644
--- a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py
+++ b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import os
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
     json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled,
@@ -59,12 +59,12 @@ def compose_service(endpoint_a, endpoint_z, constraints=[]):
     return service
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 # ----- Domains --------------------------------------------------------------------------------------------------------
 # Overall network topology
-TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID
+TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_NAME
 TOPO_ADMIN_ID   = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
 TOPO_ADMIN      = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
 
diff --git a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py
index 7063265f47344555d5b99c9c9747029227a494e0..d6a0dad6d4bc9663c6728c8d7f639aff9a7b3ec6 100644
--- a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py
+++ b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import os, uuid
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
     json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled,
@@ -68,12 +68,12 @@ def compose_service(endpoint_a, endpoint_z, constraints=[]):
     return service
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 # ----- Domains --------------------------------------------------------------------------------------------------------
 # Overall network topology
-TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID
+TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_NAME
 TOPO_ADMIN_ID   = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
 TOPO_ADMIN      = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
 
diff --git a/src/tests/oeccpsc22/deploy_in_kubernetes.sh b/src/tests/oeccpsc22/deploy_in_kubernetes.sh
index 426e07e1376207065b02db3205e46dd2cbe9a39d..fffce0b768830cc68229415879ebb6795f0e4e69 100755
--- a/src/tests/oeccpsc22/deploy_in_kubernetes.sh
+++ b/src/tests/oeccpsc22/deploy_in_kubernetes.sh
@@ -22,7 +22,7 @@ export K8S_HOSTNAME="kubernetes-master"
 #export GRAFANA_PASSWORD="admin123+"
 
 # Constants
-GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller"
+GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
 TMP_FOLDER="./tmp"
 
 # Create a tmp folder for files modified during the deployment
diff --git a/src/tests/oeccpsc22/tests/Objects_Domain_1.py b/src/tests/oeccpsc22/tests/Objects_Domain_1.py
index 8b26348c94b827e4e418a458f21b28a863c4cb68..3f0f680dfc21cc42075b87ad0a508ac64322a7f7 100644
--- a/src/tests/oeccpsc22/tests/Objects_Domain_1.py
+++ b/src/tests/oeccpsc22/tests/Objects_Domain_1.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
     json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, json_device_id)
@@ -21,12 +21,12 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id
 from .Tools import get_link_uuid, json_endpoint_ids
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-D1_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-D1_CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+D1_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+D1_CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 # ----- Topology -------------------------------------------------------------------------------------------------------
-D1_TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=D1_CONTEXT_ID)
-D1_TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=D1_CONTEXT_ID)
+D1_TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=D1_CONTEXT_ID)
+D1_TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=D1_CONTEXT_ID)
 
 # ----- Devices --------------------------------------------------------------------------------------------------------
 # Assume all devices have the same architecture of endpoints
diff --git a/src/tests/oeccpsc22/tests/Objects_Domain_2.py b/src/tests/oeccpsc22/tests/Objects_Domain_2.py
index f9133809243effc0a7d22c953046a4af4d6bad3e..e8a53725315c8f783f2c17471c84b62d55d51d1b 100644
--- a/src/tests/oeccpsc22/tests/Objects_Domain_2.py
+++ b/src/tests/oeccpsc22/tests/Objects_Domain_2.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
     json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, json_device_id)
@@ -21,12 +21,12 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id
 from .Tools import get_link_uuid, json_endpoint_ids
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-D2_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-D2_CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+D2_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+D2_CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 # ----- Topology -------------------------------------------------------------------------------------------------------
-D2_TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=D2_CONTEXT_ID)
-D2_TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=D2_CONTEXT_ID)
+D2_TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=D2_CONTEXT_ID)
+D2_TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=D2_CONTEXT_ID)
 
 # ----- Devices --------------------------------------------------------------------------------------------------------
 # Assume all devices have the same architecture of endpoints
diff --git a/src/tests/ofc22/tests/ObjectsXr.py b/src/tests/ofc22/tests/ObjectsXr.py
index 0cb223de2ede509443275496ba9ca57158335036..f743e7a81af3d6b5c4052ec6746834094580e8f3 100644
--- a/src/tests/ofc22/tests/ObjectsXr.py
+++ b/src/tests/ofc22/tests/ObjectsXr.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 from typing import Dict, List, Tuple
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
     json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled,
@@ -24,12 +24,12 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 # ----- Topology -------------------------------------------------------------------------------------------------------
-TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
-TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
+TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID)
+TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID)
 
 # ----- Monitoring Samples ---------------------------------------------------------------------------------------------
 PACKET_PORT_SAMPLE_TYPES = [
diff --git a/src/tests/ofc22/tests/test_functional_delete_service.py b/src/tests/ofc22/tests/test_functional_delete_service.py
index 0f8d088012bed164e4603a813bfe9154eda8f568..48c2a0d5a16db038ac35c1226c33989d31a23e74 100644
--- a/src/tests/ofc22/tests/test_functional_delete_service.py
+++ b/src/tests/ofc22/tests/test_functional_delete_service.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import logging
-from common.Constants import DEFAULT_CONTEXT_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME
 from common.DeviceTypes import DeviceTypeEnum
 from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
 from common.tools.descriptor.Loader import DescriptorLoader
@@ -55,7 +55,7 @@ def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # p
     assert len(response.links) == descriptor_loader.num_links
 
     l3nm_service_uuids = set()
-    response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)))
+    response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)))
     assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI)
     for service in response.services:
         service_id = service.service_id
diff --git a/src/tests/p4/tests/Objects.py b/src/tests/p4/tests/Objects.py
index 0473207a87ba9ea5c74b45d983db185f8c541cbf..544fe35ee49a63d3387568054dadcd42b8974cf3 100644
--- a/src/tests/p4/tests/Objects.py
+++ b/src/tests/p4/tests/Objects.py
@@ -14,7 +14,7 @@
 
 import os
 from typing import Dict, List, Tuple
-from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
     json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled,
@@ -30,12 +30,12 @@ from common.tools.object_factory.Topology import json_topology, json_topology_id
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 
 # ----- Context --------------------------------------------------------------------------------------------------------
-CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
-CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME)
+CONTEXT    = json_context(DEFAULT_CONTEXT_NAME)
 
 # ----- Topology -------------------------------------------------------------------------------------------------------
-TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
-TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
+TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID)
+TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_NAME, context_id=CONTEXT_ID)
 
 # ----- Monitoring Samples ---------------------------------------------------------------------------------------------
 PACKET_PORT_SAMPLE_TYPES = [
diff --git a/src/webui/Dockerfile b/src/webui/Dockerfile
index a17d2bd9aea9c6948262dcf17776f75c0be351b8..f0ab356290dd219dd4caa9a0a06251907786323c 100644
--- a/src/webui/Dockerfile
+++ b/src/webui/Dockerfile
@@ -77,6 +77,8 @@ COPY --chown=webui:webui src/context/__init__.py context/__init__.py
 COPY --chown=webui:webui src/context/client/. context/client/
 COPY --chown=webui:webui src/device/__init__.py device/__init__.py
 COPY --chown=webui:webui src/device/client/. device/client/
+COPY --chown=webui:webui src/load_generator/__init__.py load_generator/__init__.py
+COPY --chown=webui:webui src/load_generator/client/. load_generator/client/
 COPY --chown=webui:webui src/service/__init__.py service/__init__.py
 COPY --chown=webui:webui src/service/client/. service/client/
 COPY --chown=webui:webui src/slice/__init__.py slice/__init__.py
diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py
index d60cca6597ced52db8e320f3ba1beb2b032be65b..94bc91429ad43bfaf140fa6e286500f41713a4cd 100644
--- a/src/webui/service/__init__.py
+++ b/src/webui/service/__init__.py
@@ -13,6 +13,7 @@
 # limitations under the License.
 
 import json
+from typing import List, Tuple, Union
 from flask import Flask, request, session
 from flask_healthz import healthz, HealthError
 from context.client.ContextClient import ContextClient
@@ -36,10 +37,20 @@ def readiness():
         device_client.connect()
         device_client.close()
     except Exception as e:
-        raise HealthError('Can\'t connect with the service: ' + e.details())
+        raise HealthError("Can't connect with the service: {:s}".format(str(e))) from e
 
-def from_json(json_str):
-    return json.loads(json_str)
+def json_to_list(json_str : str) -> List[Union[str, Tuple[str, str]]]:
+    try:
+        data = json.loads(json_str)
+    except: # pylint: disable=bare-except
+        return [('item', str(json_str))]
+
+    if isinstance(data, dict):
+        return [('kv', (key, value)) for key, value in data.items()]
+    elif isinstance(data, list):
+        return [('item', ', '.join(data))]
+    else:
+        return [('item', str(data))]
 
 class SetSubAppMiddleware():
     def __init__(self, app, web_app_root):
@@ -63,29 +74,32 @@ def create_app(use_config=None, web_app_root=None):
     
     app.register_blueprint(healthz, url_prefix='/healthz')
 
-    from webui.service.js.routes import js
+    from webui.service.js.routes import js                  # pylint: disable=import-outside-toplevel
     app.register_blueprint(js)
 
-    from webui.service.main.routes import main
+    from webui.service.main.routes import main              # pylint: disable=import-outside-toplevel
     app.register_blueprint(main)
 
-    from webui.service.service.routes import service
+    from webui.service.load_gen.routes import load_gen      # pylint: disable=import-outside-toplevel
+    app.register_blueprint(load_gen)
+
+    from webui.service.service.routes import service        # pylint: disable=import-outside-toplevel
     app.register_blueprint(service)
 
-    from webui.service.slice.routes import slice
+    from webui.service.slice.routes import slice            # pylint: disable=import-outside-toplevel,redefined-builtin
     app.register_blueprint(slice)
 
-    from webui.service.device.routes import device
+    from webui.service.device.routes import device          # pylint: disable=import-outside-toplevel
     app.register_blueprint(device)
 
-    from webui.service.link.routes import link
+    from webui.service.link.routes import link              # pylint: disable=import-outside-toplevel
     app.register_blueprint(link)
-    
 
-    app.jinja_env.filters['from_json'] = from_json
-    
-    app.jinja_env.globals.update(get_working_context=get_working_context)
-    app.jinja_env.globals.update(get_working_topology=get_working_topology)
+    app.jinja_env.globals.update({              # pylint: disable=no-member
+        'json_to_list'        : json_to_list,
+        'get_working_context' : get_working_context,
+        'get_working_topology': get_working_topology,
+    })
 
     if web_app_root is not None:
         app.wsgi_app = SetSubAppMiddleware(app.wsgi_app, web_app_root)
diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py
index fe475594b4f0a1a8674ccbbe37c704df02f43621..140efe4b053d908c99e34551ddf6ba59d03b8e77 100644
--- a/src/webui/service/device/routes.py
+++ b/src/webui/service/device/routes.py
@@ -33,7 +33,7 @@ device_client = DeviceClient()
 
 @device.get('/')
 def home():
-    if 'context_topology_uuid' not in session:
+    if 'context_uuid' not in session or 'topology_uuid' not in session:
         flash("Please select a context!", "warning")
         return redirect(url_for("main.home"))
 
diff --git a/src/webui/service/link/routes.py b/src/webui/service/link/routes.py
index 5b8831b7732443830a6f9b1ef8f7da92b4c41cc0..0bfe2b9026050b3de6d6f0a1ee3674169c53913a 100644
--- a/src/webui/service/link/routes.py
+++ b/src/webui/service/link/routes.py
@@ -25,7 +25,7 @@ context_client = ContextClient()
 
 @link.get('/')
 def home():
-    if 'context_topology_uuid' not in session:
+    if 'context_uuid' not in session or 'topology_uuid' not in session:
         flash("Please select a context!", "warning")
         return redirect(url_for("main.home"))
 
diff --git a/src/webui/service/load_gen/__init__.py b/src/webui/service/load_gen/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/webui/service/load_gen/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/webui/service/load_gen/routes.py b/src/webui/service/load_gen/routes.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc091f3b4a5ea732d89599154659d9cbda20629b
--- /dev/null
+++ b/src/webui/service/load_gen/routes.py
@@ -0,0 +1,45 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from flask import render_template, Blueprint, flash
+from common.proto.context_pb2 import Empty
+from load_generator.client.LoadGeneratorClient import LoadGeneratorClient
+
+load_gen = Blueprint('load_gen', __name__, url_prefix='/load_gen')
+
+@load_gen.route('start', methods=['GET'])
+def start():
+    load_gen_client = LoadGeneratorClient()
+    try:
+        load_gen_client.connect()
+        load_gen_client.Start(Empty())
+        load_gen_client.close()
+        flash('Load Generator Started.', 'success')
+    except Exception as e: # pylint: disable=broad-except
+        flash('Problem starting Load Generator. {:s}'.format(str(e)), 'danger')
+
+    return render_template('main/debug.html')
+
+@load_gen.route('stop', methods=['GET'])
+def stop():
+    load_gen_client = LoadGeneratorClient()
+    try:
+        load_gen_client.connect()
+        load_gen_client.Stop(Empty())
+        load_gen_client.close()
+        flash('Load Generator Stoped.', 'success')
+    except Exception as e: # pylint: disable=broad-except
+        flash('Problem stopping Load Generator. {:s}'.format(str(e)), 'danger')
+
+    return render_template('main/debug.html')
diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py
index 0e008734730867bca741d748c49e3b0589b40e48..30ed5291103f89f7bd1fbd680f045d4213451abb 100644
--- a/src/webui/service/main/routes.py
+++ b/src/webui/service/main/routes.py
@@ -12,9 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import json, logging, re
+import base64, json, logging, re
 from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request
-from common.proto.context_pb2 import Empty, ContextIdList, TopologyId, TopologyIdList
+from common.proto.context_pb2 import ContextList, Empty, TopologyId, TopologyList
 from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Context import json_context_id
@@ -32,7 +32,7 @@ device_client = DeviceClient()
 service_client = ServiceClient()
 slice_client = SliceClient()
 
-logger = logging.getLogger(__name__)
+LOGGER = logging.getLogger(__name__)
 
 def process_descriptors(descriptors):
     try:
@@ -46,6 +46,7 @@ def process_descriptors(descriptors):
     descriptor_loader = DescriptorLoader(descriptors)
     results = descriptor_loader.process()
     for message,level in compose_notifications(results):
+        if level == 'error': LOGGER.warning('ERROR message={:s}'.format(str(message)))
         flash(message, level)
 
 @main.route('/', methods=['GET', 'POST'])
@@ -55,28 +56,45 @@ def home():
     context_topology_form: ContextTopologyForm = ContextTopologyForm()
     context_topology_form.context_topology.choices.append(('', 'Select...'))
 
-    ctx_response: ContextIdList = context_client.ListContextIds(Empty())
-    for context_id in ctx_response.context_ids:
-        context_uuid = context_id.context_uuid.uuid
-        topo_response: TopologyIdList = context_client.ListTopologyIds(context_id)
-        for topology_id in topo_response.topology_ids:
-            topology_uuid = topology_id.topology_uuid.uuid
-            context_topology_uuid  = 'ctx[{:s}]/topo[{:s}]'.format(context_uuid, topology_uuid)
-            context_topology_name  = 'Context({:s}):Topology({:s})'.format(context_uuid, topology_uuid)
+    contexts : ContextList = context_client.ListContexts(Empty())
+    for context_ in contexts.contexts:
+        #context_uuid : str = context_.context_id.context_uuid.uuid
+        context_name : str = context_.name
+        topologies : TopologyList = context_client.ListTopologies(context_.context_id)
+        for topology_ in topologies.topologies:
+            #topology_uuid : str = topology_.topology_id.topology_uuid.uuid
+            topology_name : str = topology_.name
+            raw_values = context_name, topology_name
+            b64_values = [base64.b64encode(v.encode('utf-8')).decode('utf-8') for v in raw_values]
+            context_topology_uuid = ','.join(b64_values)
+            context_topology_name  = 'Context({:s}):Topology({:s})'.format(context_name, topology_name)
             context_topology_entry = (context_topology_uuid, context_topology_name)
             context_topology_form.context_topology.choices.append(context_topology_entry)
 
     if context_topology_form.validate_on_submit():
         context_topology_uuid = context_topology_form.context_topology.data
         if len(context_topology_uuid) > 0:
-            match = re.match('ctx\[([^\]]+)\]\/topo\[([^\]]+)\]', context_topology_uuid)
-            if match is not None:
-                session['context_topology_uuid'] = context_topology_uuid = match.group(0)
-                session['context_uuid'] = context_uuid = match.group(1)
-                session['topology_uuid'] = topology_uuid = match.group(2)
-                MSG = f'Context({context_uuid})/Topology({topology_uuid}) successfully selected.'
-                flash(MSG, 'success')
-                return redirect(url_for("main.home"))
+            b64_values = context_topology_uuid.split(',')
+            raw_values = [base64.b64decode(v.encode('utf-8')).decode('utf-8') for v in b64_values]
+            context_name, topology_name = raw_values
+            #session.clear()
+            session['context_topology_uuid'] = context_topology_uuid
+            session['context_uuid'] = context_name
+            #session['context_name'] = context_name
+            session['topology_uuid'] = topology_name
+            #session['topology_name'] = topology_name
+            MSG = f'Context({context_name})/Topology({topology_name}) successfully selected.'
+            flash(MSG, 'success')
+            return redirect(url_for('main.home'))
+
+            #match = re.match('ctx\[([^\]]+)\]\/topo\[([^\]]+)\]', context_topology_uuid)
+            #if match is not None:
+            #    session['context_topology_uuid'] = context_topology_uuid = match.group(0)
+            #    session['context_uuid'] = context_uuid = match.group(1)
+            #    session['topology_uuid'] = topology_uuid = match.group(2)
+            #    MSG = f'Context({context_uuid})/Topology({topology_uuid}) successfully selected.'
+            #    flash(MSG, 'success')
+            #    return redirect(url_for('main.home'))
 
     if 'context_topology_uuid' in session:
         context_topology_form.context_topology.data = session['context_topology_uuid']
@@ -87,7 +105,7 @@ def home():
             process_descriptors(descriptor_form.descriptors)
             return redirect(url_for("main.home"))
     except Exception as e: # pylint: disable=broad-except
-        logger.exception('Descriptor load failed')
+        LOGGER.exception('Descriptor load failed')
         flash(f'Descriptor load failed: `{str(e)}`', 'danger')
     finally:
         context_client.close()
@@ -100,7 +118,7 @@ def home():
 def topology():
     context_client.connect()
     try:
-        if 'context_topology_uuid' not in session:
+        if 'context_uuid' not in session or 'topology_uuid' not in session:
             return jsonify({'devices': [], 'links': []})
 
         context_uuid = session['context_uuid']
@@ -118,7 +136,7 @@ def topology():
             if device.device_id.device_uuid.uuid not in topo_device_uuids: continue
             devices.append({
                 'id': device.device_id.device_uuid.uuid,
-                'name': device.device_id.device_uuid.uuid,
+                'name': device.name,
                 'type': device.device_type,
             })
 
@@ -128,17 +146,19 @@ def topology():
             if link.link_id.link_uuid.uuid not in topo_link_uuids: continue
             if len(link.link_endpoint_ids) != 2:
                 str_link = grpc_message_to_json_string(link)
-                logger.warning('Unexpected link with len(endpoints) != 2: {:s}'.format(str_link))
+                LOGGER.warning('Unexpected link with len(endpoints) != 2: {:s}'.format(str_link))
                 continue
             links.append({
                 'id': link.link_id.link_uuid.uuid,
+                'name': link.name,
                 'source': link.link_endpoint_ids[0].device_id.device_uuid.uuid,
                 'target': link.link_endpoint_ids[1].device_id.device_uuid.uuid,
             })
 
         return jsonify({'devices': devices, 'links': links})
-    except:
-        logger.exception('Error retrieving topology')
+    except: # pylint: disable=bare-except
+        LOGGER.exception('Error retrieving topology')
+        return jsonify({'devices': [], 'links': []})
     finally:
         context_client.close()
 
diff --git a/src/webui/service/templates/device/detail.html b/src/webui/service/templates/device/detail.html
index e49396c4f7971cb19fff5780d3830082f9422a5b..f21cbbcf02903ca5faffb355f56fe9c0e10de9ee 100644
--- a/src/webui/service/templates/device/detail.html
+++ b/src/webui/service/templates/device/detail.html
@@ -17,7 +17,7 @@
 {% extends 'base.html' %}
 
 {% block content %}
-<h1>Device {{ device.device_id.device_uuid.uuid }}</h1>
+<h1>Device {{ device.name }} ({{ device.device_id.device_uuid.uuid }})</h1>
 
 <div class="row mb-3">
     <div class="col-sm-3">
@@ -43,8 +43,9 @@
 <br>
 <div class="row mb-3">
     <div class="col-sm-4">
-        <b>UUID: </b>{{ device.device_id.device_uuid.uuid }}<br><br>
-        <b>Type: </b>{{ device.device_type }}<br><br>
+        <b>UUID: </b>{{ device.device_id.device_uuid.uuid }}<br>
+        <b>Name: </b>{{ device.name }}<br>
+        <b>Type: </b>{{ device.device_type }}<br>
         <b>Status: </b> {{ dose.Name(device.device_operational_status).replace('DEVICEOPERATIONALSTATUS_', '') }}<br>
         <b>Drivers: </b>
         <ul>
@@ -57,7 +58,8 @@
         <table class="table table-striped table-hover">
             <thead>
                 <tr>
-                    <th scope="col">Endpoints</th>
+                    <th scope="col">Endpoint UUID</th>
+                    <th scope="col">Name</th>
                     <th scope="col">Type</th>
                 </tr>
             </thead>
@@ -67,6 +69,9 @@
                     <td>
                         {{ endpoint.endpoint_id.endpoint_uuid.uuid }}
                     </td>
+                    <td>
+                        {{ endpoint.name }}
+                    </td>
                     <td>
                         {{ endpoint.endpoint_type }}
                     </td>
@@ -98,8 +103,12 @@
             </td>
             <td>
                 <ul>
-                    {% for key, value in (config.custom.resource_value | from_json).items() %}
-                    <li><b>{{ key }}:</b> {{ value }}</li>
+                    {% for item_type, item in json_to_list(config.custom.resource_value) %}
+                        {% if item_type == 'kv' %}
+                            <li><b>{{ item[0] }}:</b> {{ item[1] }}</li>
+                        {% else %}
+                            <li>{{ item }}</li>
+                        {% endif %}
                     {% endfor %}
                 </ul>
             </td>
diff --git a/src/webui/service/templates/device/home.html b/src/webui/service/templates/device/home.html
index 2c108add96df7de413f5310d4bd9e3c3fb69a6ed..7b4437ccecd0c4d4948edba862666ee83c01e4cf 100644
--- a/src/webui/service/templates/device/home.html
+++ b/src/webui/service/templates/device/home.html
@@ -42,7 +42,8 @@
     <table class="table table-striped table-hover">
         <thead>
           <tr>
-            <th scope="col">#</th>
+            <th scope="col">UUID</th>
+            <th scope="col">Name</th>
             <th scope="col">Type</th>
             <th scope="col">Endpoints</th>
             <th scope="col">Drivers</th>
@@ -56,9 +57,10 @@
                 {% for device in devices %}
                 <tr>
                     <td>
-                        <!-- <a href="{{ url_for('device.detail', device_uuid=device.device_id.device_uuid.uuid) }}"> -->
-                            {{ device.device_id.device_uuid.uuid }}
-                        <!-- </a> -->
+                        {{ device.device_id.device_uuid.uuid }}
+                    </td>
+                    <td>
+                        {{ device.name }}
                     </td>
                     <td>
                         {{ device.device_type }}
diff --git a/src/webui/service/templates/js/topology.js b/src/webui/service/templates/js/topology.js
index 29156224da2245cd1db75c4384c66b6643130f4c..adcabf62cd4cf59bb11fda3584a1e367836e45e1 100644
--- a/src/webui/service/templates/js/topology.js
+++ b/src/webui/service/templates/js/topology.js
@@ -88,9 +88,9 @@ d3.json("{{ url_for('main.topology') }}", function(data) {
         .call(d3.drag().on("start", dragstarted).on("drag", dragged).on("end", dragended));
 
     // node tooltip
-    node.append("title").text(function(d) { return d.id; });
+    node.append("title").text(function(n) { return n.name + ' (' + n.id + ')'; });
     // link tooltip
-    link.append("title").text(function(d) { return d.id; });
+    link.append("title").text(function(l) { return l.name + ' (' + l.id + ')'; });
 
     // link style
     link
diff --git a/src/webui/service/templates/link/detail.html b/src/webui/service/templates/link/detail.html
index 7df9ddce6bdddd511f3b50313cafa1374990b99e..16ec5470cfa428905091004affe28b8876d9c68d 100644
--- a/src/webui/service/templates/link/detail.html
+++ b/src/webui/service/templates/link/detail.html
@@ -16,7 +16,7 @@
    {% extends 'base.html' %}
    
    {% block content %}
-    <h1>Link {{ link.link_id.link_uuid.uuid }}</h1>
+    <h1>Link {{ link.name }} ({{ link.link_id.link_uuid.uuid }})</h1>
     <div class="row mb-3">
           <div class="col-sm-3">
                <button type="button" class="btn btn-success" onclick="window.location.href='{{ url_for('link.home') }}'">
@@ -29,13 +29,14 @@
      <br>
        <div class="row mb-3">
             <div class="col-sm-4">
-                <b>UUID: </b>{{ link.link_id.link_uuid.uuid }}<br><br>
-            </div>
+               <b>UUID: </b>{{ link.link_id.link_uuid.uuid }}<br>
+               <b>Name: </b>{{ link.name }}<br>
+           </div>
             <div class="col-sm-8">
                     <table class="table table-striped table-hover">
                         <thead>
                             <tr>
-                                <th scope="col">Endpoints</th>
+                                <th scope="col">Endpoint UUID</th>
                                 <th scope="col">Device</th>
                             </tr>
                         </thead>
diff --git a/src/webui/service/templates/link/home.html b/src/webui/service/templates/link/home.html
index 77d00d34185ac45ada0ed6d8e9915c0b2f3ad9c0..16fe36e1f4a70ce76ff32257a508acc841248605 100644
--- a/src/webui/service/templates/link/home.html
+++ b/src/webui/service/templates/link/home.html
@@ -27,7 +27,7 @@
                </a> -->
            </div>
            <div class="col">
-               {{ links | length }} links found</i>
+               {{ links | length }} links found in context <i>{{ session['context_uuid'] }}</i>
            </div>
            <!-- <div class="col">
                <form>
@@ -42,7 +42,8 @@
        <table class="table table-striped table-hover">
            <thead>
              <tr>
-               <th scope="col">#</th>
+               <th scope="col">UUID</th>
+               <th scope="col">Name</th>
                <th scope="col">Endpoints</th>
                <th scope="col"></th>
              </tr>
@@ -52,11 +53,12 @@
                    {% for link in links %}
                    <tr>
                        <td>
-                           <!-- <a href="#"> -->
-                               {{ link.link_id.link_uuid.uuid }}
-                           <!-- </a> -->
+                            {{ link.link_id.link_uuid.uuid }}
                        </td>
-   
+                       <td>
+                            {{ link.name }}
+                        </td>
+
                        <td>
                            <ul>
                                {% for end_point in link.link_endpoint_ids %}
diff --git a/src/webui/service/templates/main/debug.html b/src/webui/service/templates/main/debug.html
index d065cc49d7262940beedd5eb9aa44a2ab890a07e..1ab3be251a903c8671be8aced7e679404c19a3f6 100644
--- a/src/webui/service/templates/main/debug.html
+++ b/src/webui/service/templates/main/debug.html
@@ -19,18 +19,24 @@
 {% block content %}
     <h1>Debug</h1>
 
-    <h3>Dump ContextDB:</h3>
-    <ul>
-        <li>
-            <a class="nav-link" href="/context/api/dump/html" id="context_html_link" target="context_html">
-                as HTML
-            </a>
-        </li>
-        <li>
-            <a class="nav-link" href="/context/api/dump/text" id="context_text_link" target="context_text">
-                as Text
-            </a>
-        </li>
-    </ul>
+    <!--
+        <h3>Dump ContextDB:</h3>
+        <ul>
+            <li>
+                <a class="nav-link" href="/context/api/dump/html" id="context_html_link" target="context_html">
+                    as HTML
+                </a>
+            </li>
+            <li>
+                <a class="nav-link" href="/context/api/dump/text" id="context_text_link" target="context_text">
+                    as Text
+                </a>
+            </li>
+        </ul>
+    -->
+
+    <h3>Load Generator:</h3>
+    <a href="{{ url_for('load_gen.start') }}" class="btn btn-primary" style="margin-bottom: 10px;">Start</a>
+    <a href="{{ url_for('load_gen.stop') }}" class="btn btn-primary" style="margin-bottom: 10px;">Stop</a>
 
 {% endblock %}
diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html
index e1f963e425e23216281068b82da23c809a677296..67b240b3d0e79745b839c0103c40f4721e4befad 100644
--- a/src/webui/service/templates/service/detail.html
+++ b/src/webui/service/templates/service/detail.html
@@ -43,16 +43,16 @@
 
 <div class="row mb-3">
     <div class="col-sm-4">
-        <b>Context: </b> {{ service.service_id.context_id.context_uuid.uuid }}<br><br>
-        <b>UUID: </b> {{ service.service_id.service_uuid.uuid }}<br><br>
-        <b>Type: </b> {{ ste.Name(service.service_type).replace('SERVICETYPE_', '') }}<br><br>
-        <b>Status: </b> {{ sse.Name(service.service_status.service_status).replace('SERVICESTATUS_', '') }}<br><br>
+        <b>Context: </b> {{ service.service_id.context_id.context_uuid.uuid }}<br>
+        <b>UUID: </b> {{ service.service_id.service_uuid.uuid }}<br>
+        <b>Type: </b> {{ ste.Name(service.service_type).replace('SERVICETYPE_', '') }}<br>
+        <b>Status: </b> {{ sse.Name(service.service_status.service_status).replace('SERVICESTATUS_', '') }}<br>
     </div>
     <div class="col-sm-8">
         <table class="table table-striped table-hover">
             <thead>
                 <tr>
-                    <th scope="col">Endpoints</th>
+                    <th scope="col">Endpoint UUID</th>
                     <th scope="col">Device</th>
                 </tr>
             </thead>
@@ -159,8 +159,12 @@
             </td>
             <td>
                 <ul>
-                    {% for key, value in (config.custom.resource_value | from_json).items() %}
-                    <li><b>{{ key }}:</b> {{ value }}</li>
+                    {% for item_type, item in json_to_list(config.custom.resource_value) %}
+                        {% if item_type == 'kv' %}
+                            <li><b>{{ item[0] }}:</b> {{ item[1] }}</li>
+                        {% else %}
+                            <li>{{ item }}</li>
+                        {% endif %}
                     {% endfor %}
                 </ul>
             </td>
diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html
index 889e10ce53b4a019b55f714c2442f32f0c2b8e93..404dede394fbbc4b30d181c1872c989686c4b17a 100644
--- a/src/webui/service/templates/slice/detail.html
+++ b/src/webui/service/templates/slice/detail.html
@@ -44,16 +44,16 @@
 
 <div class="row mb-3">
     <div class="col-sm-4">
-        <b>Context: </b> {{ slice.slice_id.context_id.context_uuid.uuid }}<br><br>
-        <b>UUID: </b> {{ slice.slice_id.slice_uuid.uuid }}<br><br>
-        <b>Owner: </b> {{ slice.slice_owner.owner_uuid.uuid }}<br><br>
-        <b>Status: </b> {{ sse.Name(slice.slice_status.slice_status).replace('SLICESTATUS_', '') }}<br><br>
+        <b>Context: </b> {{ slice.slice_id.context_id.context_uuid.uuid }}<br>
+        <b>UUID: </b> {{ slice.slice_id.slice_uuid.uuid }}<br>
+        <b>Owner: </b> {{ slice.slice_owner.owner_uuid.uuid }}<br>
+        <b>Status: </b> {{ sse.Name(slice.slice_status.slice_status).replace('SLICESTATUS_', '') }}<br>
     </div>
     <div class="col-sm-8">
         <table class="table table-striped table-hover">
             <thead>
                 <tr>
-                    <th scope="col">Endpoints</th>
+                    <th scope="col">Endpoint UUID</th>
                     <th scope="col">Device</th>
                 </tr>
             </thead>
@@ -160,8 +160,12 @@
             </td>
             <td>
                 <ul>
-                    {% for key, value in (config.custom.resource_value | from_json).items() %}
-                    <li><b>{{ key }}:</b> {{ value }}</li>
+                    {% for item_type, item in json_to_list(config.custom.resource_value) %}
+                        {% if item_type == 'kv' %}
+                            <li><b>{{ item[0] }}:</b> {{ item[1] }}</li>
+                        {% else %}
+                            <li>{{ item }}</li>
+                        {% endif %}
                     {% endfor %}
                 </ul>
             </td>