diff --git a/.gitignore b/.gitignore
index 5dc4372a5956ba5ce2b5ef6fc4359616c7cb5cd5..7e3b0cd6a26b755aeac4422f530c331d25a0cc43 100644
--- a/.gitignore
+++ b/.gitignore
@@ -162,6 +162,7 @@ cython_debug/
 
 # TeraFlowSDN-generated files
 tfs_runtime_env_vars.sh
+tfs_bchain_runtime_env_vars.sh
 delete_local_deployment.sh
 local_docker_deployment.sh
 local_k8s_deployment.sh
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 3de792462d28b2d42e71b0329aefce2c2928984e..dac76342a9fdb48247cc171cfdf37fd6b60600ba 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -42,3 +42,4 @@ include:
   #- local: '/src/slice/.gitlab-ci.yml'
   #- local: '/src/interdomain/.gitlab-ci.yml'
   - local: '/src/pathcomp/.gitlab-ci.yml'
+  #- local: '/src/dlt/.gitlab-ci.yml'
diff --git a/deploy.sh b/deploy.sh
index 71441fe5dd39b13658e1c7712115118b412d64cb..add41fa139a0127cb26d652f5b47decfe8658ad0 100755
--- a/deploy.sh
+++ b/deploy.sh
@@ -66,44 +66,71 @@ echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT
 
 for COMPONENT in $TFS_COMPONENTS; do
     echo "Processing '$COMPONENT' component..."
-    IMAGE_NAME="$COMPONENT:$TFS_IMAGE_TAG"
-    IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g')
 
     echo "  Building Docker image..."
     BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
 
     if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then
-        docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
+        docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
     elif [ "$COMPONENT" == "pathcomp" ]; then
         BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
-        docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . >> "$BUILD_LOG"
+        docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG"
 
         BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
-        docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
+        docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG"
         # next command is redundant, but helpful to keep cache updated between rebuilds
-        docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG-builder" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
+        IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder"
+        docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
+    elif [ "$COMPONENT" == "dlt" ]; then
+        BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log"
+        docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG"
+
+        BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log"
+        docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG"
     else
-        docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
+        docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
     fi
 
     if [ -n "$TFS_REGISTRY_IMAGE" ]; then
         echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGE'..."
 
         if [ "$COMPONENT" == "pathcomp" ]; then
-            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log"
-            docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL-frontend" > "$TAG_LOG"
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
 
-            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log"
-            docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL-backend" > "$TAG_LOG"
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log"
+            docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
 
             PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log"
-            docker push "$IMAGE_URL-frontend" > "$PUSH_LOG"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log"
+            docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
 
             PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log"
-            docker push "$IMAGE_URL-backend" > "$PUSH_LOG"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+        elif [ "$COMPONENT" == "dlt" ]; then
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log"
+            docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log"
+            docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
         else
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
             TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
-            docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG"
+            docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
 
             PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
             docker push "$IMAGE_URL" > "$PUSH_LOG"
@@ -117,33 +144,48 @@ for COMPONENT in $TFS_COMPONENTS; do
     if [ -n "$TFS_REGISTRY_IMAGE" ]; then
         # Registry is set
         if [ "$COMPONENT" == "pathcomp" ]; then
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
             VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL-frontend#g" "$MANIFEST"
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
 
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
             VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL-backend#g" "$MANIFEST"
-
-            sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+        elif [ "$COMPONENT" == "dlt" ]; then
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f3)
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f3)
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
         else
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
             VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
             sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
-            sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
         fi
+
+        sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
     else
         # Registry is not set
         if [ "$COMPONENT" == "pathcomp" ]; then
             VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_NAME-frontend#g" "$MANIFEST"
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $COMPONENT-frontend:$TFS_IMAGE_TAG#g" "$MANIFEST"
 
             VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_NAME-backend#g" "$MANIFEST"
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $COMPONENT-backend:$TFS_IMAGE_TAG#g" "$MANIFEST"
+        elif [ "$COMPONENT" == "dlt" ]; then
+            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f3)
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $COMPONENT-connector:$TFS_IMAGE_TAG#g" "$MANIFEST"
 
-            sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
+            VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f3)
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $COMPONENT-gateway:$TFS_IMAGE_TAG#g" "$MANIFEST"
         else
             VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
-            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST"
-            sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
+            sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $COMPONENT:$TFS_IMAGE_TAG#g" "$MANIFEST"
         fi
+
+        sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
     fi
 
     # TODO: harmonize names of the monitoring component
@@ -157,7 +199,7 @@ for COMPONENT in $TFS_COMPONENTS; do
 
     echo "  Collecting env-vars for '$COMPONENT' component..."
 
-    SERVICE_DATA=$(kubectl get service ${COMPONENT}service --namespace $TFS_K8S_NAMESPACE -o json)
+    SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME}service --namespace $TFS_K8S_NAMESPACE -o json)
     if [ -z "${SERVICE_DATA}" ]; then continue; fi
 
     # Env vars for service's host address
@@ -189,6 +231,7 @@ for EXTRA_MANIFEST in $TFS_EXTRA_MANIFESTS; do
     kubectl --namespace $TFS_K8S_NAMESPACE apply -f $EXTRA_MANIFEST
     printf "\n"
 done
+printf "\n"
 
 # By now, leave these controls here. Some component dependencies are not well handled.
 
@@ -203,8 +246,9 @@ fi
 
 for COMPONENT in $TFS_COMPONENTS; do
     echo "Waiting for '$COMPONENT' component..."
+    COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
     kubectl wait --namespace $TFS_K8S_NAMESPACE \
-        --for='condition=available' --timeout=10s deployment/${COMPONENT}service
+        --for='condition=available' --timeout=300s deployment/${COMPONENT_OBJNAME}service
     printf "\n"
 done
 
diff --git a/deploy_mock_blockchain.sh b/deploy_mock_blockchain.sh
new file mode 100755
index 0000000000000000000000000000000000000000..066820fc0f9a1005823dd124798e4de122f206f8
--- /dev/null
+++ b/deploy_mock_blockchain.sh
@@ -0,0 +1,121 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+# Set the URL of your local Docker registry where the images will be uploaded to.
+REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# Set the tag you want to use for your images.
+IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy to.
+K8S_NAMESPACE="tfs-bchain"
+
+COMPONENT="mock_blockchain"
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Constants
+GITLAB_REPO_URL="registry.gitlab.com/teraflow-h2020/controller"
+TMP_FOLDER="./tmp"
+
+# Create a tmp folder for files modified during the deployment
+TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
+mkdir -p $TMP_MANIFESTS_FOLDER
+TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
+mkdir -p $TMP_LOGS_FOLDER
+
+echo "Deleting and Creating a new namespace..."
+kubectl delete namespace $K8S_NAMESPACE
+kubectl create namespace $K8S_NAMESPACE
+printf "\n"
+
+echo "Deploying components and collecting environment variables..."
+ENV_VARS_SCRIPT=tfs_bchain_runtime_env_vars.sh
+echo "# Environment variables for TeraFlow Mock-Blockchain deployment" > $ENV_VARS_SCRIPT
+PYTHONPATH=$(pwd)/src
+
+echo "Processing '$COMPONENT' component..."
+IMAGE_NAME="$COMPONENT:$IMAGE_TAG"
+IMAGE_URL=$(echo "$REGISTRY_IMAGE/$IMAGE_NAME" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+echo "  Building Docker image..."
+BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
+docker build -t "$IMAGE_NAME" -f ./src/dlt/mock_blockchain/Dockerfile . > "$BUILD_LOG"
+
+if [ -n "$REGISTRY_IMAGE" ]; then
+    echo "  Pushing Docker image to '$REGISTRY_IMAGE'..."
+
+    TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
+    docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG"
+
+    PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
+    docker push "$IMAGE_URL" > "$PUSH_LOG"
+fi
+
+echo "  Adapting '$COMPONENT' manifest file..."
+MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}.yaml"
+cp ./manifests/"${COMPONENT}".yaml "$MANIFEST"
+
+if [ -n "$REGISTRY_IMAGE" ]; then
+    # Registry is set
+    VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
+    sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+    sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
+else
+    # Registry is not set
+    VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f3)
+    sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_NAME#g" "$MANIFEST"
+    sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Never#g" "$MANIFEST"
+fi
+
+echo "  Deploying '$COMPONENT' component to Kubernetes..."
+DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log"
+kubectl --namespace $K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
+COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
+kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
+kubectl --namespace $K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME} >> "$DEPLOY_LOG"
+
+echo "  Collecting env-vars for '$COMPONENT' component..."
+SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME} --namespace $K8S_NAMESPACE -o json)
+
+# Env vars for service's host address
+SERVICE_HOST=$(echo ${SERVICE_DATA} | jq -r '.spec.clusterIP')
+ENVVAR_HOST=$(echo "${COMPONENT}_SERVICE_HOST" | tr '[:lower:]' '[:upper:]')
+echo "export ${ENVVAR_HOST}=${SERVICE_HOST}" >> $ENV_VARS_SCRIPT
+
+# Env vars for service's 'grpc' port
+SERVICE_PORT_GRPC=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="grpc") | .port')
+ENVVAR_PORT_GRPC=$(echo "${COMPONENT}_SERVICE_PORT_GRPC" | tr '[:lower:]' '[:upper:]')
+echo "export ${ENVVAR_PORT_GRPC}=${SERVICE_PORT_GRPC}" >> $ENV_VARS_SCRIPT
+
+printf "\n"
+
+echo "Waiting for '$COMPONENT' component..."
+kubectl wait --namespace $K8S_NAMESPACE \
+    --for='condition=available' --timeout=300s deployment/${COMPONENT_OBJNAME}
+printf "\n"
+
+echo "Deployment Resources:"
+kubectl --namespace $K8S_NAMESPACE get all
+printf "\n"
+
+echo "Done!"
diff --git a/manifests/dltservice.yaml b/manifests/dltservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5ef6eae7de6cb7c839b0cb17e65c8b3f045c1d66
--- /dev/null
+++ b/manifests/dltservice.yaml
@@ -0,0 +1,86 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: dltservice
+spec:
+  selector:
+    matchLabels:
+      app: dltservice
+  template:
+    metadata:
+      labels:
+        app: dltservice
+    spec:
+      terminationGracePeriodSeconds: 5
+      containers:
+      - name: connector
+        image: registry.gitlab.com/teraflow-h2020/controller/dlt-connector:latest
+        imagePullPolicy: Always
+        ports:
+        - containerPort: 8080
+        env:
+        - name: LOG_LEVEL
+          value: "INFO"
+        readinessProbe:
+          exec:
+            command: ["/bin/grpc_health_probe", "-addr=:8080"]
+        livenessProbe:
+          exec:
+            command: ["/bin/grpc_health_probe", "-addr=:8080"]
+        resources:
+          requests:
+            cpu: 250m
+            memory: 512Mi
+          limits:
+            cpu: 700m
+            memory: 1024Mi
+      - name: gateway
+        image: registry.gitlab.com/teraflow-h2020/controller/dlt-gateway:latest
+        imagePullPolicy: Always
+        #readinessProbe:
+        #  httpGet:
+        #    path: /health
+        #    port: 8081
+        #  initialDelaySeconds: 5
+        #  timeoutSeconds: 5
+        #livenessProbe:
+        #  httpGet:
+        #    path: /health
+        #    port: 8081
+        #  initialDelaySeconds: 5
+        #  timeoutSeconds: 5
+        resources:
+          requests:
+            cpu: 250m
+            memory: 512Mi
+          limits:
+            cpu: 700m
+            memory: 1024Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: dltservice
+spec:
+  type: ClusterIP
+  selector:
+    app: dltservice
+  ports:
+  - name: grpc
+    protocol: TCP
+    port: 8080
+    targetPort: 8080
diff --git a/manifests/mock_blockchain.yaml b/manifests/mock_blockchain.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b383d7db42be9eb3c9dc7758c230f5250eb43db1
--- /dev/null
+++ b/manifests/mock_blockchain.yaml
@@ -0,0 +1,64 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: mock-blockchain
+spec:
+  selector:
+    matchLabels:
+      app: mock-blockchain
+  template:
+    metadata:
+      labels:
+        app: mock-blockchain
+    spec:
+      terminationGracePeriodSeconds: 5
+      containers:
+      - name: server
+        image: registry.gitlab.com/teraflow-h2020/controller/mock_blockchain:latest
+        imagePullPolicy: Always
+        ports:
+        - containerPort: 50051
+        env:
+        - name: LOG_LEVEL
+          value: "DEBUG"
+        readinessProbe:
+          exec:
+            command: ["/bin/grpc_health_probe", "-addr=:50051"]
+        livenessProbe:
+          exec:
+            command: ["/bin/grpc_health_probe", "-addr=:50051"]
+        resources:
+          requests:
+            cpu: 250m
+            memory: 512Mi
+          limits:
+            cpu: 700m
+            memory: 1024Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: mock-blockchain
+spec:
+  type: ClusterIP
+  selector:
+    app: mock-blockchain
+  ports:
+  - name: grpc
+    protocol: TCP
+    port: 50051
+    targetPort: 50051
diff --git a/proto/dlt_gateway.proto b/proto/dlt_gateway.proto
index b2c1297ccdd4c765862f4643b554d5373d8eccd3..84fe0fef6be366deb9286d49193ddb934c70a55c 100644
--- a/proto/dlt_gateway.proto
+++ b/proto/dlt_gateway.proto
@@ -21,8 +21,8 @@ service DltGatewayService {
   rpc RecordToDlt   (DltRecord                 ) returns (       DltRecordStatus  ) {}
   rpc GetFromDlt    (DltRecordId               ) returns (       DltRecord        ) {}
   rpc SubscribeToDlt(DltRecordSubscription     ) returns (stream DltRecordEvent   ) {}
-  rpc GetDltStatus  (context.TeraFlowController) returns (       DltPeerStatus    ) {}  // NEC is checkig if it is possible
-  rpc GetDltPeers   (context.Empty             ) returns (       DltPeerStatusList) {}  // NEC is checkig if it is possible
+  rpc GetDltStatus  (context.TeraFlowController) returns (       DltPeerStatus    ) {}  // NEC is checking if it is possible
+  rpc GetDltPeers   (context.Empty             ) returns (       DltPeerStatusList) {}  // NEC is checking if it is possible
 }
 
 enum DltRecordTypeEnum {
diff --git a/proto/monitoring.proto b/proto/monitoring.proto
index 8b83afa47b49c130d37dcbcc1024f079ebc2a2fe..9be39db909d915b2a9b5d99b01841db028959543 100644
--- a/proto/monitoring.proto
+++ b/proto/monitoring.proto
@@ -19,24 +19,24 @@ import "context.proto";
 import "kpi_sample_types.proto";
 
 service MonitoringService {
-  rpc SetKpi                (KpiDescriptor      ) returns (KpiId               ) {}
-  rpc DeleteKpi             (KpiId              ) returns (context.Empty       ) {}
-  rpc GetKpiDescriptor      (KpiId              ) returns (KpiDescriptor       ) {}
-  rpc GetKpiDescriptorList  (context.Empty      ) returns (KpiDescriptorList   ) {}
-  rpc IncludeKpi            (Kpi                ) returns (context.Empty       ) {}
-  rpc MonitorKpi            (MonitorKpiRequest  ) returns (context.Empty       ) {}
-  rpc QueryKpiData          (KpiQuery           ) returns (KpiList             ) {}
-  rpc SetKpiSubscription    (SubsDescriptor     ) returns (stream KpiList      ) {}
-  rpc GetSubsDescriptor     (SubscriptionID     ) returns (SubsDescriptor      ) {}
-  rpc GetSubscriptions      (context.Empty      ) returns (SubsIDList          ) {}
-  rpc DeleteSubscription    (SubscriptionID     ) returns (context.Empty       ) {}
-  rpc SetKpiAlarm           (AlarmDescriptor    ) returns (AlarmID             ) {}
-  rpc GetAlarms             (context.Empty      ) returns (AlarmIDList         ) {}
-  rpc GetAlarmDescriptor    (AlarmID            ) returns (AlarmDescriptor     ) {}
-  rpc GetAlarmResponseStream(AlarmSubscription  ) returns (stream AlarmResponse) {}
-  rpc DeleteAlarm           (AlarmID            ) returns (context.Empty       ) {}
-  rpc GetStreamKpi          (KpiId              ) returns (stream Kpi          ) {}
-  rpc GetInstantKpi         (KpiId              ) returns (KpiList             ) {}
+  rpc SetKpi                (KpiDescriptor      ) returns (KpiId               ) {} // Stable not final
+  rpc DeleteKpi             (KpiId              ) returns (context.Empty       ) {} // Stable and final
+  rpc GetKpiDescriptor      (KpiId              ) returns (KpiDescriptor       ) {} // Stable and final
+  rpc GetKpiDescriptorList  (context.Empty      ) returns (KpiDescriptorList   ) {} // Stable and final
+  rpc IncludeKpi            (Kpi                ) returns (context.Empty       ) {} // Stable and final
+  rpc MonitorKpi            (MonitorKpiRequest  ) returns (context.Empty       ) {} // Stable and final
+  rpc QueryKpiData          (KpiQuery           ) returns (KpiList             ) {} // Not implemented
+  rpc SetKpiSubscription    (SubsDescriptor     ) returns (stream SubsResponse ) {} // Stable not final
+  rpc GetSubsDescriptor     (SubscriptionID     ) returns (SubsDescriptor      ) {} // Stable and final
+  rpc GetSubscriptions      (context.Empty      ) returns (SubsList            ) {} // Stable and final
+  rpc DeleteSubscription    (SubscriptionID     ) returns (context.Empty       ) {} // Stable and final
+  rpc SetKpiAlarm           (AlarmDescriptor    ) returns (AlarmID             ) {} // Stable not final
+  rpc GetAlarms             (context.Empty      ) returns (AlarmList           ) {} // Stable and final
+  rpc GetAlarmDescriptor    (AlarmID            ) returns (AlarmDescriptor     ) {} // Stable and final
+  rpc GetAlarmResponseStream(AlarmSubscription  ) returns (stream AlarmResponse) {} // Not Stable not final
+  rpc DeleteAlarm           (AlarmID            ) returns (context.Empty       ) {} // Stable and final
+  rpc GetStreamKpi          (KpiId              ) returns (stream Kpi          ) {} // Stable not final
+  rpc GetInstantKpi         (KpiId              ) returns (Kpi             ) {} // Stable not final
 }
 
 message KpiDescriptor {
@@ -58,7 +58,7 @@ message MonitorKpiRequest {
 }
 
 message KpiQuery {
-  repeated KpiId    kpi_id              = 1;
+  KpiId    kpi_id              = 1;
   float             monitoring_window_s = 2;
   float             sampling_rate_s     = 3;
   uint32            last_n_samples      = 4;  // used when you want something like "get the last N many samples
@@ -99,7 +99,7 @@ message KpiValue {
 
 
 message KpiList {
-  repeated Kpi kpi_list = 1;
+  repeated Kpi kpi = 1;
 }
 
 message KpiDescriptorList {
@@ -122,19 +122,19 @@ message SubscriptionID {
 
 message SubsResponse {
   SubscriptionID   subs_id  = 1;
-  repeated KpiList kpi_list = 2;
+  KpiList          kpi_list = 2;
 }
 
-message SubsIDList {
-  repeated SubscriptionID subs_list = 1;
+message SubsList {
+  repeated SubsDescriptor subs_descriptor = 1;
 }
 
 message AlarmDescriptor {
   AlarmID                     alarm_id              = 1;
   string                      alarm_description     = 2;
   string                      name                  = 3;
-  repeated KpiId              kpi_id                = 4;
-  repeated KpiValueRange      kpi_value_range       = 5;
+  KpiId                       kpi_id                = 4;
+  KpiValueRange               kpi_value_range       = 5;
   context.Timestamp           timestamp             = 6;
 }
 
@@ -143,7 +143,7 @@ message AlarmID{
 }
 
 message AlarmSubscription{
-  AlarmID alarmID                   = 1;
+  AlarmID alarm_id                  = 1;
   float   subscription_timeout_s    = 2;
   float   subscription_frequency_ms = 3;
 }
@@ -151,10 +151,9 @@ message AlarmSubscription{
 message AlarmResponse {
   AlarmID           alarm_id  = 1;
   string            text      = 2;
-  KpiValue          kpi_value = 3;
-  context.Timestamp timestamp = 4;
+  KpiList           kpi_list  = 3;
 }
 
-message AlarmIDList {
-    repeated AlarmID alarm_list = 1;
+message AlarmList {
+    repeated AlarmDescriptor alarm_descriptor = 1;
 }
diff --git a/src/common/Constants.py b/src/common/Constants.py
index 955c4b8bddd8787268f6ca14f5952b25e87705af..67ae5e9d0b8ce4e4da1c5a1c545f83ce2ade5990 100644
--- a/src/common/Constants.py
+++ b/src/common/Constants.py
@@ -50,6 +50,9 @@ class ServiceNameEnum(Enum):
     PATHCOMP      = 'pathcomp'
     WEBUI         = 'webui'
 
+    # Used for test and debugging only
+    DLT_GATEWAY   = 'dlt-gateway'
+
 # Default gRPC service ports
 DEFAULT_SERVICE_GRPC_PORTS = {
     ServiceNameEnum.CONTEXT      .value :  1010,
@@ -65,6 +68,9 @@ DEFAULT_SERVICE_GRPC_PORTS = {
     ServiceNameEnum.L3AM         .value : 10002,  
     ServiceNameEnum.INTERDOMAIN  .value : 10010,
     ServiceNameEnum.PATHCOMP     .value : 10020,
+
+    # Used for test and debugging only
+    ServiceNameEnum.DLT_GATEWAY  .value : 50051,
 }
 
 # Default HTTP/REST-API service ports
diff --git a/src/common/tests/MockMessageBroker.py b/src/common/tests/MockMessageBroker.py
new file mode 100644
index 0000000000000000000000000000000000000000..851c06766fd705bee746840f3d4ce9c4f4ac404d
--- /dev/null
+++ b/src/common/tests/MockMessageBroker.py
@@ -0,0 +1,61 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging, threading, time
+from queue import Queue, Empty
+from typing import Dict, Iterator, NamedTuple, Set
+
+LOGGER = logging.getLogger(__name__)
+CONSUME_TIMEOUT = 0.1 # seconds
+
+class Message(NamedTuple):
+    topic: str
+    content: str
+
+class MockMessageBroker:
+    def __init__(self):
+        self._terminate = threading.Event()
+        self._topic__to__queues : Dict[str, Set[Queue]] = {}
+
+    def publish(self, message : Message) -> None:
+        queues = self._topic__to__queues.get(message.topic, None)
+        if queues is None: return
+        for queue in queues: queue.put_nowait((message.topic, message.content))
+
+    def consume(
+        self, topic_names : Set[str], block : bool = True, consume_timeout : float = CONSUME_TIMEOUT
+    ) -> Iterator[Message]:
+        queue = Queue()
+        for topic_name in topic_names:
+            self._topic__to__queues.setdefault(topic_name, set()).add(queue)
+
+        while not self._terminate.is_set():
+            try:
+                message = queue.get(block=block, timeout=consume_timeout)
+            except Empty:
+                continue
+            if message is None: continue
+            yield Message(*message)
+
+        for topic_name in topic_names:
+            self._topic__to__queues.get(topic_name, set()).discard(queue)
+
+    def terminate(self):
+        self._terminate.set()
+
+def notify_event(messagebroker, topic_name, event_type, fields) -> None:
+    event = {'event': {'timestamp': time.time(), 'event_type': event_type}}
+    for field_name, field_value in fields.items():
+        event[field_name] = field_value
+    messagebroker.publish(Message(topic_name, json.dumps(event)))
diff --git a/src/common/tests/MockServicerImpl_Context.py b/src/common/tests/MockServicerImpl_Context.py
index 9f80fdbcab0419072a4299f908a7b637038c2a1b..c56ed382adad4b2daa2e3d61575d2973f02bfbe2 100644
--- a/src/common/tests/MockServicerImpl_Context.py
+++ b/src/common/tests/MockServicerImpl_Context.py
@@ -12,22 +12,31 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import grpc, logging
+import grpc, json, logging
 from typing import Any, Dict, Iterator, List
 from common.proto.context_pb2 import (
     Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList,
     Context, ContextEvent, ContextId, ContextIdList, ContextList,
     Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList,
-    Empty,
+    Empty, EventTypeEnum,
     Link, LinkEvent, LinkId, LinkIdList, LinkList,
     Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList,
     Slice, SliceEvent, SliceId, SliceIdList, SliceList,
     Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList)
 from common.proto.context_pb2_grpc import ContextServiceServicer
-from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tests.MockMessageBroker import MockMessageBroker, notify_event
+from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string
 
 LOGGER = logging.getLogger(__name__)
 
+TOPIC_CONNECTION = 'connection'
+TOPIC_CONTEXT    = 'context'
+TOPIC_TOPOLOGY   = 'topology'
+TOPIC_DEVICE     = 'device'
+TOPIC_LINK       = 'link'
+TOPIC_SERVICE    = 'service'
+TOPIC_SLICE      = 'slice'
+
 def get_container(database : Dict[str, Dict[str, Any]], container_name : str) -> Dict[str, Any]:
     return database.setdefault(container_name, {})
 
@@ -35,10 +44,15 @@ def get_entries(database : Dict[str, Dict[str, Any]], container_name : str) -> L
     container = get_container(database, container_name)
     return [container[entry_uuid] for entry_uuid in sorted(container.keys())]
 
+def has_entry(database : Dict[str, Dict[str, Any]], container_name : str, entry_uuid : str) -> Any:
+    LOGGER.debug('[has_entry] BEFORE database={:s}'.format(str(database)))
+    container = get_container(database, container_name)
+    return entry_uuid in container
+
 def get_entry(
     context : grpc.ServicerContext, database : Dict[str, Dict[str, Any]], container_name : str, entry_uuid : str
 ) -> Any:
-    LOGGER.debug('[get_entry] AFTER database={:s}'.format(str(database)))
+    LOGGER.debug('[get_entry] BEFORE database={:s}'.format(str(database)))
     container = get_container(database, container_name)
     if entry_uuid not in container:
         context.abort(grpc.StatusCode.NOT_FOUND, str('{:s}({:s}) not found'.format(container_name, entry_uuid)))
@@ -64,8 +78,27 @@ class MockServicerImpl_Context(ContextServiceServicer):
     def __init__(self):
         LOGGER.info('[__init__] Creating Servicer...')
         self.database : Dict[str, Any] = {}
+        self.msg_broker = MockMessageBroker()
         LOGGER.info('[__init__] Servicer Created')
 
+    # ----- Common -----------------------------------------------------------------------------------------------------
+
+    def _set(self, request, container_name, entry_uuid, entry_id_field_name, topic_name):
+        exists = has_entry(self.database, container_name, entry_uuid)
+        entry = set_entry(self.database, container_name, entry_uuid, request)
+        event_type = EventTypeEnum.EVENTTYPE_UPDATE if exists else EventTypeEnum.EVENTTYPE_CREATE
+        entry_id = getattr(entry, entry_id_field_name)
+        dict_entry_id = grpc_message_to_json(entry_id)
+        notify_event(self.msg_broker, topic_name, event_type, {entry_id_field_name: dict_entry_id})
+        return entry_id
+
+    def _del(self, request, container_name, entry_uuid, entry_id_field_name, topic_name, grpc_context):
+        empty = del_entry(grpc_context, self.database, container_name, entry_uuid)
+        event_type = EventTypeEnum.EVENTTYPE_REMOVE
+        dict_entry_id = grpc_message_to_json(request)
+        notify_event(self.msg_broker, topic_name, event_type, {entry_id_field_name: dict_entry_id})
+        return empty
+
     # ----- Context ----------------------------------------------------------------------------------------------------
 
     def ListContextIds(self, request: Empty, context : grpc.ServicerContext) -> ContextIdList:
@@ -82,14 +115,15 @@ class MockServicerImpl_Context(ContextServiceServicer):
 
     def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId:
         LOGGER.info('[SetContext] request={:s}'.format(grpc_message_to_json_string(request)))
-        return set_entry(self.database, 'context', request.context_id.context_uuid.uuid, request).context_id
+        return self._set(request, 'context', request.context_uuid.uuid, 'context_id', TOPIC_CONTEXT)
 
     def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty:
         LOGGER.info('[RemoveContext] request={:s}'.format(grpc_message_to_json_string(request)))
-        return del_entry(context, self.database, 'context', request.context_uuid.uuid)
+        return self._del(request, 'context', request.context_uuid.uuid, 'context_id', TOPIC_CONTEXT, context)
 
     def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]:
         LOGGER.info('[GetContextEvents] request={:s}'.format(grpc_message_to_json_string(request)))
+        for message in self.msg_broker.consume({TOPIC_CONTEXT}): yield ContextEvent(**json.loads(message.content))
 
 
     # ----- Topology ---------------------------------------------------------------------------------------------------
@@ -112,15 +146,18 @@ class MockServicerImpl_Context(ContextServiceServicer):
     def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId:
         LOGGER.info('[SetTopology] request={:s}'.format(grpc_message_to_json_string(request)))
         container_name = 'topology[{:s}]'.format(str(request.topology_id.context_id.context_uuid.uuid))
-        return set_entry(self.database, container_name, request.topology_id.topology_uuid.uuid, request).topology_id
+        topology_uuid = request.topology_id.topology_uuid.uuid
+        return self._set(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY)
 
     def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty:
         LOGGER.info('[RemoveTopology] request={:s}'.format(grpc_message_to_json_string(request)))
         container_name = 'topology[{:s}]'.format(str(request.context_id.context_uuid.uuid))
-        return del_entry(context, self.database, container_name, request.topology_uuid.uuid)
+        topology_uuid = request.topology_uuid.uuid
+        return self._del(request, container_name, topology_uuid, 'topology_id', TOPIC_TOPOLOGY, context)
 
     def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]:
         LOGGER.info('[GetTopologyEvents] request={:s}'.format(grpc_message_to_json_string(request)))
+        for message in self.msg_broker.consume({TOPIC_TOPOLOGY}): yield TopologyEvent(**json.loads(message.content))
 
 
     # ----- Device -----------------------------------------------------------------------------------------------------
@@ -139,14 +176,15 @@ class MockServicerImpl_Context(ContextServiceServicer):
 
     def SetDevice(self, request: Context, context : grpc.ServicerContext) -> DeviceId:
         LOGGER.info('[SetDevice] request={:s}'.format(grpc_message_to_json_string(request)))
-        return set_entry(self.database, 'device', request.device_id.device_uuid.uuid, request).device_id
+        return self._set(request, 'device', request.device_id.device_uuid.uuid, 'device_id', TOPIC_DEVICE)
 
     def RemoveDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Empty:
         LOGGER.info('[RemoveDevice] request={:s}'.format(grpc_message_to_json_string(request)))
-        return del_entry(context, self.database, 'device', request.device_uuid.uuid)
+        return self._del(request, 'device', request.device_uuid.uuid, 'device_id', TOPIC_DEVICE, context)
 
     def GetDeviceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]:
         LOGGER.info('[GetDeviceEvents] request={:s}'.format(grpc_message_to_json_string(request)))
+        for message in self.msg_broker.consume({TOPIC_DEVICE}): yield DeviceEvent(**json.loads(message.content))
 
 
     # ----- Link -------------------------------------------------------------------------------------------------------
@@ -165,14 +203,15 @@ class MockServicerImpl_Context(ContextServiceServicer):
 
     def SetLink(self, request: Context, context : grpc.ServicerContext) -> LinkId:
         LOGGER.info('[SetLink] request={:s}'.format(grpc_message_to_json_string(request)))
-        return set_entry(self.database, 'link', request.link_id.link_uuid.uuid, request).link_id
+        return self._set(request, 'link', request.link_id.link_uuid.uuid, 'link_id', TOPIC_LINK)
 
     def RemoveLink(self, request: LinkId, context : grpc.ServicerContext) -> Empty:
         LOGGER.info('[RemoveLink] request={:s}'.format(grpc_message_to_json_string(request)))
-        return del_entry(context, self.database, 'link', request.link_uuid.uuid)
+        return self._del(request, 'link', request.link_uuid.uuid, 'link_id', TOPIC_LINK, context)
 
     def GetLinkEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]:
         LOGGER.info('[GetLinkEvents] request={:s}'.format(grpc_message_to_json_string(request)))
+        for message in self.msg_broker.consume({TOPIC_LINK}): yield LinkEvent(**json.loads(message.content))
 
 
     # ----- Slice ------------------------------------------------------------------------------------------------------
@@ -226,17 +265,19 @@ class MockServicerImpl_Context(ContextServiceServicer):
 
     def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId:
         LOGGER.info('[SetService] request={:s}'.format(grpc_message_to_json_string(request)))
-        return set_entry(
-            self.database, 'service[{:s}]'.format(str(request.service_id.context_id.context_uuid.uuid)),
-            request.service_id.service_uuid.uuid, request).service_id
+        container_name = 'service[{:s}]'.format(str(request.service_id.context_id.context_uuid.uuid))
+        service_uuid = request.service_id.service_uuid.uuid
+        return self._set(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE)
 
     def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty:
         LOGGER.info('[RemoveService] request={:s}'.format(grpc_message_to_json_string(request)))
         container_name = 'service[{:s}]'.format(str(request.context_id.context_uuid.uuid))
-        return del_entry(context, self.database, container_name, request.service_uuid.uuid)
+        service_uuid = request.service_id.service_uuid.uuid
+        return self._del(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE, context)
 
     def GetServiceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]:
         LOGGER.info('[GetServiceEvents] request={:s}'.format(grpc_message_to_json_string(request)))
+        for message in self.msg_broker.consume({TOPIC_SERVICE}): yield ServiceEvent(**json.loads(message.content))
 
 
     # ----- Connection -------------------------------------------------------------------------------------------------
@@ -259,21 +300,21 @@ class MockServicerImpl_Context(ContextServiceServicer):
 
     def SetConnection(self, request: Connection, context : grpc.ServicerContext) -> ConnectionId:
         LOGGER.info('[SetConnection] request={:s}'.format(grpc_message_to_json_string(request)))
-        service_connection__container_name = 'service_connection[{:s}/{:s}]'.format(
+        container_name = 'service_connection[{:s}/{:s}]'.format(
             str(request.service_id.context_id.context_uuid.uuid), str(request.service_id.service_uuid.uuid))
-        set_entry(
-            self.database, service_connection__container_name, request.connection_id.connection_uuid.uuid, request)
-        return set_entry(
-            self.database, 'connection', request.connection_id.connection_uuid.uuid, request).connection_id
+        connection_uuid = request.connection_id.connection_uuid.uuid
+        set_entry(self.database, container_name, connection_uuid, request)
+        return self._set(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION)
 
     def RemoveConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Empty:
         LOGGER.info('[RemoveConnection] request={:s}'.format(grpc_message_to_json_string(request)))
         connection = get_entry(context, self.database, 'connection', request.connection_uuid.uuid)
-        service_id = connection.service_id
-        service_connection__container_name = 'service_connection[{:s}/{:s}]'.format(
-            str(service_id.context_id.context_uuid.uuid), str(service_id.service_uuid.uuid))
-        del_entry(context, self.database, service_connection__container_name, request.connection_uuid.uuid)
-        return del_entry(context, self.database, 'connection', request.connection_uuid.uuid)
+        container_name = 'service_connection[{:s}/{:s}]'.format(
+            str(connection.service_id.context_id.context_uuid.uuid), str(connection.service_id.service_uuid.uuid))
+        connection_uuid = request.connection_uuid.uuid
+        del_entry(context, self.database, container_name, connection_uuid)
+        return self._del(request, 'connection', connection_uuid, 'connection_id', TOPIC_CONNECTION, context)
 
     def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]:
         LOGGER.info('[GetConnectionEvents] request={:s}'.format(grpc_message_to_json_string(request)))
+        for message in self.msg_broker.consume({TOPIC_CONNECTION}): yield ConnectionEvent(**json.loads(message.content))
diff --git a/src/common/tests/MockServicerImpl_DltGateway.py b/src/common/tests/MockServicerImpl_DltGateway.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d750168238b2a041badd1974f27e57f62363d90
--- /dev/null
+++ b/src/common/tests/MockServicerImpl_DltGateway.py
@@ -0,0 +1,165 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, itertools, json, logging, time
+from typing import Any, Dict, Iterator, Optional, Tuple
+from common.tests.MockMessageBroker import Message, MockMessageBroker
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.proto.context_pb2 import EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, Empty, TeraFlowController
+from common.proto.dlt_gateway_pb2 import (
+    DLTRECORDOPERATION_ADD, DLTRECORDOPERATION_DELETE, DLTRECORDOPERATION_UNDEFINED, DLTRECORDOPERATION_UPDATE,
+    DLTRECORDSTATUS_FAILED, DLTRECORDSTATUS_SUCCEEDED, DLTRECORDTYPE_CONTEXT, DLTRECORDTYPE_DEVICE, DLTRECORDTYPE_LINK,
+    DLTRECORDTYPE_SERVICE, DLTRECORDTYPE_SLICE, DLTRECORDTYPE_TOPOLOGY, DLTRECORDTYPE_UNDEFINED,
+    DltPeerStatus, DltPeerStatusList, DltRecord, DltRecordEvent, DltRecordId, DltRecordOperationEnum, DltRecordStatus,
+    DltRecordSubscription, DltRecordTypeEnum)
+from common.proto.dlt_gateway_pb2_grpc import DltGatewayServiceServicer
+
+LOGGER = logging.getLogger(__name__)
+
+DltRecordKey  = Tuple[str, Any, str]            # domain_uuid, DltRecordOperationEnum, record_uuid
+DltRecordDict = Dict[DltRecordKey, DltRecord]   # dlt_record_key => dlt_record
+
+class AlreadyExistsException(Exception):
+    pass
+
+class DoesNotExistException(Exception):
+    pass
+
+class MockServicerImpl_DltGateway(DltGatewayServiceServicer):
+    def __init__(self):
+        LOGGER.info('[__init__] Creating Servicer...')
+        self.records : DltRecordDict = {}
+        self.msg_broker = MockMessageBroker()
+        LOGGER.info('[__init__] Servicer Created')
+
+    def __get_record(self, record_id : DltRecordId, should_exist : bool) -> Optional[Dict]:
+        domain_uuid, record_uuid = record_id.domain_uuid.uuid, record_id.record_uuid.uuid
+        str_type = DltRecordTypeEnum.Name(record_id.type).upper().replace('DLTRECORDTYPE_', '')
+        records_domain : Dict[str, Dict] = self.records.setdefault(domain_uuid, {})
+        records_type   : Dict[str, Dict] = records_domain.setdefault(str_type, {})
+        record         : Optional[Dict] = records_type.get(record_uuid)
+        if should_exist and record is None:
+            raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid))
+        elif not should_exist and record is not None:
+            raise AlreadyExistsException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid))
+        return record
+
+    def __set_record(self, record_id : DltRecordId, should_exist : bool, data_json : str) -> None:
+        domain_uuid, record_uuid = record_id.domain_uuid.uuid, record_id.record_uuid.uuid
+        str_type = DltRecordTypeEnum.Name(record_id.type).upper().replace('DLTRECORDTYPE_', '')
+        records_domain : Dict[str, Dict] = self.records.setdefault(domain_uuid, {})
+        records_type   : Dict[str, Dict] = records_domain.setdefault(str_type, {})
+        record         : Optional[Dict] = records_type.get(record_uuid)
+        if should_exist and record is None:
+            raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid))
+        elif not should_exist and record is not None:
+            raise AlreadyExistsException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid))
+        records_type[record_uuid] = json.loads(data_json)
+
+    def __del_record(self, record_id : DltRecordId) -> None:
+        domain_uuid, record_uuid = record_id.domain_uuid.uuid, record_id.record_uuid.uuid
+        str_type = DltRecordTypeEnum.Name(record_id.type).upper().replace('DLTRECORDTYPE_', '')
+        records_domain : Dict[str, Dict] = self.records.setdefault(domain_uuid, {})
+        records_type   : Dict[str, Dict] = records_domain.setdefault(str_type, {})
+        record         : Optional[Dict] = records_type.get(record_uuid)
+        if record is None:
+            raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid))
+        records_type.discard(record_uuid)
+
+    def __publish(self, operation : DltRecordOperationEnum, record_id : DltRecordId) -> None:
+        str_operation = DltRecordOperationEnum.Name(operation).upper().replace('DLTRECORDOPERATION_', '')
+        str_type = DltRecordTypeEnum.Name(record_id.type).upper().replace('DLTRECORDTYPE_', '')
+        topic = '{:s}:{:s}'.format(str_type, str_operation)
+        event = DltRecordEvent()
+        event.event.timestamp.timestamp = time.time()       # pylint: disable=no-member
+        event.event.event_type = {                          # pylint: disable=no-member
+            DLTRECORDOPERATION_ADD   : EVENTTYPE_CREATE,
+            DLTRECORDOPERATION_UPDATE: EVENTTYPE_UPDATE,
+            DLTRECORDOPERATION_DELETE: EVENTTYPE_REMOVE,
+        }.get(operation)
+        event.record_id.CopyFrom(record_id)                 # pylint: disable=no-member
+        self.msg_broker.publish(Message(topic=topic, content=grpc_message_to_json_string(event)))
+
+    def RecordToDlt(self, request : DltRecord, context : grpc.ServicerContext) -> DltRecordStatus:
+        LOGGER.info('[RecordToDlt] request={:s}'.format(grpc_message_to_json_string(request)))
+        record_id = request.record_id
+        response = DltRecordStatus()
+        response.record_id.CopyFrom(record_id)              # pylint: disable=no-member
+        try:
+            operation : DltRecordOperationEnum = request.operation
+            if operation == DLTRECORDOPERATION_ADD:
+                self.__set_record(record_id, False, request.data_json)
+            elif operation == DLTRECORDOPERATION_UPDATE:
+                self.__set_record(record_id, True, request.data_json)
+            elif operation == DLTRECORDOPERATION_DELETE:
+                self.__del_record(record_id)
+            else:
+                str_operation = DltRecordOperationEnum.Name(operation).upper().replace('DLTRECORDOPERATION_', '')
+                raise NotImplementedError('DltRecordOperationEnum({:s})'.format(str_operation))
+            self.__publish(operation, record_id)
+            response.status = DLTRECORDSTATUS_SUCCEEDED
+        except Exception as e: # pylint: disable=broad-except
+            response.status = DLTRECORDSTATUS_FAILED
+            response.error_message = str(e)
+        LOGGER.info('[RecordToDlt] response={:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    def GetFromDlt(self, request : DltRecordId, context : grpc.ServicerContext) -> DltRecord:
+        LOGGER.info('[GetFromDlt] request={:s}'.format(grpc_message_to_json_string(request)))
+        record = self.__get_record(request, True)
+        response = DltRecord()
+        response.record_id.CopyFrom(request) # pylint: disable=no-member
+        response.operation = DLTRECORDOPERATION_UNDEFINED
+        response.data_json = json.dumps(record, sort_keys=True)
+        LOGGER.info('[GetFromDlt] response={:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    def SubscribeToDlt(
+        self, request: DltRecordSubscription, context : grpc.ServicerContext
+    ) -> Iterator[DltRecordEvent]:
+        LOGGER.info('[SubscribeToDlt] request={:s}'.format(grpc_message_to_json_string(request)))
+        types = request.type
+        if len(types) == 0:
+            types = [
+                DLTRECORDTYPE_UNDEFINED, DLTRECORDTYPE_CONTEXT, DLTRECORDTYPE_TOPOLOGY, DLTRECORDTYPE_DEVICE,
+                DLTRECORDTYPE_LINK, DLTRECORDTYPE_SERVICE, DLTRECORDTYPE_SLICE
+            ]
+        str_types = [
+            DltRecordTypeEnum.Name(_type).upper().replace('DLTRECORDTYPE_', '')
+            for _type in types
+        ]
+        operations = request.operation
+        if len(operations) == 0:
+            operations = [
+                DLTRECORDOPERATION_UNDEFINED, DLTRECORDOPERATION_ADD, DLTRECORDOPERATION_UPDATE,
+                DLTRECORDOPERATION_DELETE
+            ]
+        str_operations = [
+            DltRecordOperationEnum.Name(_operation).upper().replace('DLTRECORDOPERATION_', '')
+            for _operation in operations
+        ]
+        topics = {
+            '{:s}:{:s}'.format(*type_operation)
+            for type_operation in itertools.product(str_types, str_operations)
+        }
+        for message in self.msg_broker.consume(topics):
+            yield DltRecordEvent(**json.loads(message.content))
+
+    def GetDltStatus(self, request : TeraFlowController, context : grpc.ServicerContext) -> DltPeerStatus:
+        LOGGER.info('[GetDltStatus] request={:s}'.format(grpc_message_to_json_string(request)))
+        raise NotImplementedError()
+
+    def GetDltPeers(self, request : Empty, context : grpc.ServicerContext) -> DltPeerStatusList:
+        LOGGER.info('[GetDltPeers] request={:s}'.format(grpc_message_to_json_string(request)))
+        raise NotImplementedError()
diff --git a/src/common/tools/object_factory/Link.py b/src/common/tools/object_factory/Link.py
index 922a39dbe24f4f4b635f378180ab13c80322801b..12c233464f575fefdaa13afe457ca1ae500f15b6 100644
--- a/src/common/tools/object_factory/Link.py
+++ b/src/common/tools/object_factory/Link.py
@@ -13,15 +13,21 @@
 # limitations under the License.
 
 import copy
-from typing import Dict, List
+from typing import Dict, List, Tuple
 
 def get_link_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str:
     return '{:s}/{:s}=={:s}/{:s}'.format(
         a_endpoint_id['device_id']['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'],
         z_endpoint_id['device_id']['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid'])
 
-def json_link_id(link_uuid : str):
+def json_link_id(link_uuid : str) -> Dict:
     return {'link_uuid': {'uuid': link_uuid}}
 
-def json_link(link_uuid : str, endpoint_ids : List[Dict]):
+def json_link(link_uuid : str, endpoint_ids : List[Dict]) -> Dict:
     return {'link_id': json_link_id(link_uuid), 'link_endpoint_ids': copy.deepcopy(endpoint_ids)}
+
+def compose_link(endpoint_a, endpoint_z) -> Tuple[Dict, Dict]:
+    link_uuid = get_link_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id'])
+    link_id   = json_link_id(link_uuid)
+    link      = json_link(link_uuid, [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']])
+    return link_id, link
diff --git a/src/context/client/EventsCollector.py b/src/context/client/EventsCollector.py
index 14a297231f757771beb2c01bc557e5e3de0defb0..9715098bd3cd979d78a83b4839e40613d3997d1e 100644
--- a/src/context/client/EventsCollector.py
+++ b/src/context/client/EventsCollector.py
@@ -22,26 +22,57 @@ LOGGER.setLevel(logging.DEBUG)
 
 class EventsCollector:
     def __init__(
-        self, context_client_grpc : ContextClient, log_events_received=False
+        self, context_client          : ContextClient,
+        log_events_received           : bool = False,
+        activate_context_collector    : bool = True,
+        activate_topology_collector   : bool = True,
+        activate_device_collector     : bool = True,
+        activate_link_collector       : bool = True,
+        activate_service_collector    : bool = True,
+        activate_slice_collector      : bool = True,
+        activate_connection_collector : bool = True,
+
     ) -> None:
         self._events_queue = queue.Queue()
         self._log_events_received = log_events_received
 
-        self._context_stream    = context_client_grpc.GetContextEvents(Empty())
-        self._topology_stream   = context_client_grpc.GetTopologyEvents(Empty())
-        self._device_stream     = context_client_grpc.GetDeviceEvents(Empty())
-        self._link_stream       = context_client_grpc.GetLinkEvents(Empty())
-        self._service_stream    = context_client_grpc.GetServiceEvents(Empty())
-        self._slice_stream      = context_client_grpc.GetSliceEvents(Empty())
-        self._connection_stream = context_client_grpc.GetConnectionEvents(Empty())
-
-        self._context_thread    = threading.Thread(target=self._collect, args=(self._context_stream   ,), daemon=False)
-        self._topology_thread   = threading.Thread(target=self._collect, args=(self._topology_stream  ,), daemon=False)
-        self._device_thread     = threading.Thread(target=self._collect, args=(self._device_stream    ,), daemon=False)
-        self._link_thread       = threading.Thread(target=self._collect, args=(self._link_stream      ,), daemon=False)
-        self._service_thread    = threading.Thread(target=self._collect, args=(self._service_stream   ,), daemon=False)
-        self._slice_thread      = threading.Thread(target=self._collect, args=(self._slice_stream     ,), daemon=False)
-        self._connection_thread = threading.Thread(target=self._collect, args=(self._connection_stream,), daemon=False)
+        self._context_stream, self._context_thread = None, None
+        if activate_context_collector:
+            self._context_stream = context_client.GetContextEvents(Empty())
+            self._context_thread = self._create_collector_thread(self._context_stream)
+
+        self._topology_stream, self._topology_thread = None, None
+        if activate_topology_collector:
+            self._topology_stream = context_client.GetTopologyEvents(Empty())
+            self._topology_thread = self._create_collector_thread(self._topology_stream)
+
+        self._device_stream, self._device_thread = None, None
+        if activate_device_collector:
+            self._device_stream = context_client.GetDeviceEvents(Empty())
+            self._device_thread = self._create_collector_thread(self._device_stream)
+
+        self._link_stream, self._link_thread = None, None
+        if activate_link_collector:
+            self._link_stream = context_client.GetLinkEvents(Empty())
+            self._link_thread = self._create_collector_thread(self._link_stream)
+
+        self._service_stream, self._service_thread = None, None
+        if activate_service_collector:
+            self._service_stream = context_client.GetServiceEvents(Empty())
+            self._service_thread = self._create_collector_thread(self._service_stream)
+
+        self._slice_stream, self._slice_thread = None, None
+        if activate_slice_collector:
+            self._slice_stream = context_client.GetSliceEvents(Empty())
+            self._slice_thread = self._create_collector_thread(self._slice_stream)
+
+        self._connection_stream, self._connection_thread = None, None
+        if activate_connection_collector:
+            self._connection_stream = context_client.GetConnectionEvents(Empty())
+            self._connection_thread = self._create_collector_thread(self._connection_stream)
+
+    def _create_collector_thread(self, stream, as_daemon : bool = False):
+        return threading.Thread(target=self._collect, args=(stream,), daemon=as_daemon)
 
     def _collect(self, events_stream) -> None:
         try:
@@ -54,13 +85,13 @@ class EventsCollector:
                 raise # pragma: no cover
 
     def start(self):
-        self._context_thread.start()
-        self._topology_thread.start()
-        self._device_thread.start()
-        self._link_thread.start()
-        self._service_thread.start()
-        self._slice_thread.start()
-        self._connection_thread.start()
+        if self._context_thread    is not None: self._context_thread.start()
+        if self._topology_thread   is not None: self._topology_thread.start()
+        if self._device_thread     is not None: self._device_thread.start()
+        if self._link_thread       is not None: self._link_thread.start()
+        if self._service_thread    is not None: self._service_thread.start()
+        if self._slice_thread      is not None: self._slice_thread.start()
+        if self._connection_thread is not None: self._connection_thread.start()
 
     def get_event(self, block : bool = True, timeout : float = 0.1):
         try:
@@ -83,18 +114,18 @@ class EventsCollector:
         return sorted(events, key=lambda e: e.event.timestamp.timestamp)
 
     def stop(self):
-        self._context_stream.cancel()
-        self._topology_stream.cancel()
-        self._device_stream.cancel()
-        self._link_stream.cancel()
-        self._service_stream.cancel()
-        self._slice_stream.cancel()
-        self._connection_stream.cancel()
-
-        self._context_thread.join()
-        self._topology_thread.join()
-        self._device_thread.join()
-        self._link_thread.join()
-        self._service_thread.join()
-        self._slice_thread.join()
-        self._connection_thread.join()
+        if self._context_stream    is not None: self._context_stream.cancel()
+        if self._topology_stream   is not None: self._topology_stream.cancel()
+        if self._device_stream     is not None: self._device_stream.cancel()
+        if self._link_stream       is not None: self._link_stream.cancel()
+        if self._service_stream    is not None: self._service_stream.cancel()
+        if self._slice_stream      is not None: self._slice_stream.cancel()
+        if self._connection_stream is not None: self._connection_stream.cancel()
+
+        if self._context_thread    is not None: self._context_thread.join()
+        if self._topology_thread   is not None: self._topology_thread.join()
+        if self._device_thread     is not None: self._device_thread.join()
+        if self._link_thread       is not None: self._link_thread.join()
+        if self._service_thread    is not None: self._service_thread.join()
+        if self._slice_thread      is not None: self._slice_thread.join()
+        if self._connection_thread is not None: self._connection_thread.join()
diff --git a/src/device/service/drivers/transport_api/Tools.py b/src/device/service/drivers/transport_api/Tools.py
index 6faee37ce540443b982b523239e37a2edc779659..6ae928eb8f6bf8371dbf28b7ee9cc1995b3c191f 100644
--- a/src/device/service/drivers/transport_api/Tools.py
+++ b/src/device/service/drivers/transport_api/Tools.py
@@ -17,6 +17,12 @@ from device.service.driver_api._Driver import RESOURCE_ENDPOINTS
 
 LOGGER = logging.getLogger(__name__)
 
+HTTP_OK_CODES = {
+    200,    # OK
+    201,    # Created
+    202,    # Accepted
+    204,    # No Content
+}
 
 def find_key(resource, key):
     return json.loads(resource[1])[key]
@@ -97,10 +103,10 @@ def create_connectivity_service(
         LOGGER.exception('Exception creating ConnectivityService(uuid={:s}, data={:s})'.format(str(uuid), str(data)))
         results.append(e)
     else:
-        if response.status_code != 201:
+        if response.status_code not in HTTP_OK_CODES:
             msg = 'Could not create ConnectivityService(uuid={:s}, data={:s}). status_code={:s} reply={:s}'
             LOGGER.error(msg.format(str(uuid), str(data), str(response.status_code), str(response)))
-        results.append(response.status_code == 201)
+        results.append(response.status_code in HTTP_OK_CODES)
     return results
 
 def delete_connectivity_service(root_url, timeout, uuid):
@@ -113,8 +119,8 @@ def delete_connectivity_service(root_url, timeout, uuid):
         LOGGER.exception('Exception deleting ConnectivityService(uuid={:s})'.format(str(uuid)))
         results.append(e)
     else:
-        if response.status_code != 201:
+        if response.status_code not in HTTP_OK_CODES:
             msg = 'Could not delete ConnectivityService(uuid={:s}). status_code={:s} reply={:s}'
             LOGGER.error(msg.format(str(uuid), str(response.status_code), str(response)))
-        results.append(response.status_code == 202)
+        results.append(response.status_code in HTTP_OK_CODES)
     return results
diff --git a/src/dlt/.gitlab-ci.yml b/src/dlt/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3c2013f50904eb9cd366bf3e3b3cfce6d10c6fd6
--- /dev/null
+++ b/src/dlt/.gitlab-ci.yml
@@ -0,0 +1,184 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build dlt:
+  variables:
+    IMAGE_NAME: 'dlt' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    # This first build tags the builder resulting image to prevent being removed by dangling image removal command
+    - docker build -t "${IMAGE_NAME}-gateway:$IMAGE_TAG" -f ./src/$IMAGE_NAME/gateway/Dockerfile .
+    - docker build -t "${IMAGE_NAME}-connector:$IMAGE_TAG" -f ./src/$IMAGE_NAME/connector/Dockerfile .
+    - docker tag "${IMAGE_NAME}-gateway:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG"
+    - docker tag "${IMAGE_NAME}-connector:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-connector:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-connector:$IMAGE_TAG"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/.gitlab-ci.yml
+      - src/$IMAGE_NAME/gateway/**/*.{kt,kts,proto,pem,json}
+      - src/$IMAGE_NAME/gateway/build.gradle.kts
+      - src/$IMAGE_NAME/gateway/Dockerfile
+      - src/$IMAGE_NAME/gateway/gradle.properties
+      - src/$IMAGE_NAME/gateway/gradlew
+      - src/$IMAGE_NAME/gateway/gradlew.bat
+      - src/$IMAGE_NAME/gateway/settings.gradle.kts
+      - src/$IMAGE_NAME/connector/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/connector/Dockerfile
+      - src/$IMAGE_NAME/connector/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+
+# Apply unit test to the component
+unit test dlt-gateway:
+  variables:
+    IMAGE_NAME: 'dlt' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build dlt
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
+    - if docker container ls | grep ${IMAGE_NAME}-connector; then docker rm -f ${IMAGE_NAME}-connector; else echo "${IMAGE_NAME}-connector image is not in the system"; fi
+    - if docker container ls | grep ${IMAGE_NAME}-gateway; then docker rm -f ${IMAGE_NAME}-gateway; else echo "${IMAGE_NAME}-gateway image is not in the system"; fi
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG"
+    #- docker run --name ${IMAGE_NAME}-gateway -d -p 50051:50051 -v "$PWD/src/${IMAGE_NAME}/gateway/tests:/opt/results" --network=teraflowbridge ${IMAGE_NAME}-gateway:${IMAGE_TAG}
+    - docker run --name ${IMAGE_NAME}-gateway -d -p 50051:50051 --network=teraflowbridge ${IMAGE_NAME}-gateway:${IMAGE_TAG}
+    - sleep 5
+    - docker ps -a
+    - docker logs ${IMAGE_NAME}-gateway
+    #- docker exec -i ${IMAGE_NAME}-gateway bash -c "curl -0 -v -X POST -H 'Expect:' -H 'Content-Type:\ application/json' http://127.0.0.1:8081/dlt/api/v1/compRoute -d @/var/teraflow/tests/pc-req.json"
+    #- docker kill --signal=SIGUSR1 dlt-gateway
+    #- docker exec -i ${IMAGE_NAME}-gateway bash -c "gcovr"
+  #coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    - docker logs ${IMAGE_NAME}-gateway
+    - docker rm -f ${IMAGE_NAME}-gateway
+    - docker network rm teraflowbridge
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/.gitlab-ci.yml
+      - src/$IMAGE_NAME/gateway/**/*.{kt,kts,proto,pem,json}
+      - src/$IMAGE_NAME/gateway/build.gradle.kts
+      - src/$IMAGE_NAME/gateway/Dockerfile
+      - src/$IMAGE_NAME/gateway/gradle.properties
+      - src/$IMAGE_NAME/gateway/gradlew
+      - src/$IMAGE_NAME/gateway/gradlew.bat
+      - src/$IMAGE_NAME/gateway/settings.gradle.kts
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+  #artifacts:
+  #    when: always
+  #    reports:
+  #      junit: src/$IMAGE_NAME/gateway/tests/${IMAGE_NAME}-gateway_report.xml
+
+# Apply unit test to the component
+unit test dlt-connector:
+  variables:
+    IMAGE_NAME: 'dlt' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build dlt
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge --subnet=172.28.0.0/24 --gateway=172.28.0.254 teraflowbridge; fi
+    - if docker container ls | grep ${IMAGE_NAME}-connector; then docker rm -f ${IMAGE_NAME}-connector; else echo "${IMAGE_NAME}-connector image is not in the system"; fi
+    - if docker container ls | grep ${IMAGE_NAME}-gateway; then docker rm -f ${IMAGE_NAME}-gateway; else echo "${IMAGE_NAME}-gateway image is not in the system"; fi
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-connector:$IMAGE_TAG"
+    - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG"
+    - docker run --name ${IMAGE_NAME}-gateway -d -p 50051:50051 -v "$PWD/src/${IMAGE_NAME}/gateway/tests:/opt/results" --network=teraflowbridge --ip 172.28.0.1 $CI_REGISTRY_IMAGE/${IMAGE_NAME}-gateway:$IMAGE_TAG
+    - sleep 1
+    - docker run --name ${IMAGE_NAME}-connector -d -p 8080:8080 --env "DLT_GATEWAY_HOST=172.28.0.1" --env "DLT_GATEWAY_PORT=50051" -v "$PWD/src/${IMAGE_NAME}/connector/tests:/opt/results" --network=teraflowbridge --ip 172.28.0.2 $CI_REGISTRY_IMAGE/${IMAGE_NAME}-connector:$IMAGE_TAG
+    - sleep 5
+    - docker ps -a
+    - docker logs ${IMAGE_NAME}-connector
+    - docker logs ${IMAGE_NAME}-gateway
+    - docker exec -i ${IMAGE_NAME}-connector bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/connector/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}-connector_report.xml"
+    - docker exec -i ${IMAGE_NAME}-connector bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+  coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    - docker ps -a
+    - docker logs ${IMAGE_NAME}-connector
+    - docker logs ${IMAGE_NAME}-gateway
+    - docker rm -f ${IMAGE_NAME}-connector
+    - docker rm -f ${IMAGE_NAME}-gateway
+    - docker network rm teraflowbridge
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/.gitlab-ci.yml
+      - src/$IMAGE_NAME/gateway/**/*.{kt,kts,proto,pem,json}
+      - src/$IMAGE_NAME/gateway/build.gradle.kts
+      - src/$IMAGE_NAME/gateway/Dockerfile
+      - src/$IMAGE_NAME/gateway/gradle.properties
+      - src/$IMAGE_NAME/gateway/gradlew
+      - src/$IMAGE_NAME/gateway/gradlew.bat
+      - src/$IMAGE_NAME/gateway/settings.gradle.kts
+      - src/$IMAGE_NAME/connector/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/connector/Dockerfile
+      - src/$IMAGE_NAME/connector/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+  artifacts:
+      when: always
+      reports:
+        junit: src/$IMAGE_NAME/connector/tests/${IMAGE_NAME}-connector_report.xml
+
+# Deployment of the service in Kubernetes Cluster
+deploy dlt:
+  variables:
+    IMAGE_NAME: 'dlt' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: deploy
+  needs:
+    - unit test dlt-gateway
+    - unit test dlt-connector
+    # - integ_test execute
+  script:
+    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+    - kubectl version
+    - kubectl get all
+    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+    - kubectl get all
+  # environment:
+  #   name: test
+  #   url: https://example.com
+  #   kubernetes:
+  #     namespace: test
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+      when: manual    
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+      when: manual
diff --git a/src/dlt/connector/Config.py b/src/dlt/connector/Config.py
new file mode 100644
index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a
--- /dev/null
+++ b/src/dlt/connector/Config.py
@@ -0,0 +1,13 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/dlt/connector/Dockerfile b/src/dlt/connector/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..51e9ec506f0c8a6c35ceac68833e3ad683ef8e63
--- /dev/null
+++ b/src/dlt/connector/Dockerfile
@@ -0,0 +1,69 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/dlt/connector
+WORKDIR /var/teraflow/dlt/connector
+COPY src/dlt/connector/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/context/. context/
+COPY src/dlt/connector/. dlt/connector
+
+# Start the service
+ENTRYPOINT ["python", "-m", "dlt.connector.service"]
diff --git a/src/dlt/connector/client/DltConnectorClient.py b/src/dlt/connector/client/DltConnectorClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..f48562996b067ca81a99b6ceb7288029be7ba1c8
--- /dev/null
+++ b/src/dlt/connector/client/DltConnectorClient.py
@@ -0,0 +1,95 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
+from common.proto.context_pb2 import DeviceId, Empty, ServiceId, SliceId
+from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceStub
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.tools.grpc.Tools import grpc_message_to_json_string
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 15
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class DltConnectorClient:
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.DLT)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.DLT)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint))
+        self.channel = None
+        self.stub = None
+        self.connect()
+        LOGGER.debug('Channel created')
+
+    def connect(self):
+        self.channel = grpc.insecure_channel(self.endpoint)
+        self.stub = DltConnectorServiceStub(self.channel)
+
+    def close(self):
+        if self.channel is not None: self.channel.close()
+        self.channel = None
+        self.stub = None
+
+    @RETRY_DECORATOR
+    def RecordAll(self, request : Empty) -> Empty:
+        LOGGER.debug('RecordAll request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.RecordAll(request)
+        LOGGER.debug('RecordAll result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def RecordAllDevices(self, request : Empty) -> Empty:
+        LOGGER.debug('RecordAllDevices request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.RecordAllDevices(request)
+        LOGGER.debug('RecordAllDevices result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def RecordDevice(self, request : DeviceId) -> Empty:
+        LOGGER.debug('RecordDevice request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.RecordDevice(request)
+        LOGGER.debug('RecordDevice result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def RecordAllServices(self, request : Empty) -> Empty:
+        LOGGER.debug('RecordAllServices request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.RecordAllServices(request)
+        LOGGER.debug('RecordAllServices result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def RecordService(self, request : ServiceId) -> Empty:
+        LOGGER.debug('RecordService request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.RecordService(request)
+        LOGGER.debug('RecordService result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def RecordAllSlices(self, request : Empty) -> Empty:
+        LOGGER.debug('RecordAllSlices request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.RecordAllSlices(request)
+        LOGGER.debug('RecordAllSlices result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def RecordSlice(self, request : SliceId) -> Empty:
+        LOGGER.debug('RecordSlice request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.RecordSlice(request)
+        LOGGER.debug('RecordSlice result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
diff --git a/src/dlt/connector/client/DltEventsCollector.py b/src/dlt/connector/client/DltEventsCollector.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fe2474cead37094c507a8a612181dc7f7243544
--- /dev/null
+++ b/src/dlt/connector/client/DltEventsCollector.py
@@ -0,0 +1,72 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging, queue, threading
+from common.proto.dlt_gateway_pb2 import DltRecordSubscription
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from dlt.connector.client.DltGatewayClient import DltGatewayClient
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+class DltEventsCollector:
+    def __init__(
+        self, dltgateway_client : DltGatewayClient,
+        log_events_received     : bool = False,
+    ) -> None:
+        self._events_queue = queue.Queue()
+        self._log_events_received = log_events_received
+        subscription = DltRecordSubscription() # bu default subscribe to all
+        self._dltgateway_stream = dltgateway_client.SubscribeToDlt(subscription)
+        self._dltgateway_thread = self._create_collector_thread(self._dltgateway_stream)
+
+    def _create_collector_thread(self, stream, as_daemon : bool = False):
+        return threading.Thread(target=self._collect, args=(stream,), daemon=as_daemon)
+
+    def _collect(self, events_stream) -> None:
+        try:
+            for event in events_stream:
+                if self._log_events_received:
+                    LOGGER.info('[_collect] event: {:s}'.format(grpc_message_to_json_string(event)))
+                self._events_queue.put_nowait(event)
+        except grpc.RpcError as e:
+            if e.code() != grpc.StatusCode.CANCELLED: # pylint: disable=no-member
+                raise # pragma: no cover
+
+    def start(self):
+        if self._dltgateway_thread is not None: self._dltgateway_thread.start()
+
+    def get_event(self, block : bool = True, timeout : float = 0.1):
+        try:
+            return self._events_queue.get(block=block, timeout=timeout)
+        except queue.Empty: # pylint: disable=catching-non-exception
+            return None
+
+    def get_events(self, block : bool = True, timeout : float = 0.1, count : int = None):
+        events = []
+        if count is None:
+            while True:
+                event = self.get_event(block=block, timeout=timeout)
+                if event is None: break
+                events.append(event)
+        else:
+            for _ in range(count):
+                event = self.get_event(block=block, timeout=timeout)
+                if event is None: continue
+                events.append(event)
+        return sorted(events, key=lambda e: e.event.timestamp.timestamp)
+
+    def stop(self):
+        if self._dltgateway_stream is not None: self._dltgateway_stream.cancel()
+        if self._dltgateway_thread is not None: self._dltgateway_thread.join()
diff --git a/src/dlt/connector/client/DltGatewayClient.py b/src/dlt/connector/client/DltGatewayClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1f8dec391bb836cea33422176730d250090429d
--- /dev/null
+++ b/src/dlt/connector/client/DltGatewayClient.py
@@ -0,0 +1,84 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Iterator
+import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
+from common.proto.context_pb2 import Empty, TeraFlowController
+from common.proto.dlt_gateway_pb2 import (
+    DltPeerStatus, DltPeerStatusList, DltRecord, DltRecordEvent, DltRecordId, DltRecordStatus, DltRecordSubscription)
+from common.proto.dlt_gateway_pb2_grpc import DltGatewayServiceStub
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.tools.grpc.Tools import grpc_message_to_json_string
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 15
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class DltGatewayClient:
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.DLT)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.DLT)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint))
+        self.channel = None
+        self.stub = None
+        self.connect()
+        LOGGER.debug('Channel created')
+
+    def connect(self):
+        self.channel = grpc.insecure_channel(self.endpoint)
+        self.stub = DltGatewayServiceStub(self.channel)
+
+    def close(self):
+        if self.channel is not None: self.channel.close()
+        self.channel = None
+        self.stub = None
+
+    @RETRY_DECORATOR
+    def RecordToDlt(self, request : DltRecord) -> DltRecordStatus:
+        LOGGER.debug('RecordToDlt request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.RecordToDlt(request)
+        LOGGER.debug('RecordToDlt result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def GetFromDlt(self, request : DltRecordId) -> DltRecord:
+        LOGGER.debug('GetFromDlt request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.GetFromDlt(request)
+        LOGGER.debug('GetFromDlt result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def SubscribeToDlt(self, request : DltRecordSubscription) -> Iterator[DltRecordEvent]:
+        LOGGER.debug('SubscribeToDlt request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SubscribeToDlt(request)
+        LOGGER.debug('SubscribeToDlt result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def GetDltStatus(self, request : TeraFlowController) -> DltPeerStatus:
+        LOGGER.debug('GetDltStatus request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.GetDltStatus(request)
+        LOGGER.debug('GetDltStatus result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def GetDltPeers(self, request : Empty) -> DltPeerStatusList:
+        LOGGER.debug('GetDltPeers request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.GetDltPeers(request)
+        LOGGER.debug('GetDltPeers result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
diff --git a/src/dlt/connector/client/__init__.py b/src/dlt/connector/client/__init__.py
index 70a33251242c51f49140e596b8208a19dd5245f7..9953c820575d42fa88351cc8de022d880ba96e6a 100644
--- a/src/dlt/connector/client/__init__.py
+++ b/src/dlt/connector/client/__init__.py
@@ -11,4 +11,3 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
diff --git a/src/dlt/connector/main_test.py b/src/dlt/connector/main_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ad90eb35444b7ba4de00159372e466e8fc68905
--- /dev/null
+++ b/src/dlt/connector/main_test.py
@@ -0,0 +1,43 @@
+# pip install grpcio==1.47.0 grpcio-tools==1.47.0 protobuf==3.20.1
+# PYTHONPATH=/home/cttc/teraflow/src python -m dlt.connector.main_test
+
+import logging, sys, time
+from common.proto.dlt_gateway_pb2 import DLTRECORDOPERATION_ADD, DLTRECORDOPERATION_UPDATE, DLTRECORDTYPE_DEVICE, DltRecord
+from common.tools.object_factory.Device import json_device
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from src.common.proto.context_pb2 import DEVICEOPERATIONALSTATUS_ENABLED, Device
+from .client.DltGatewayClient import DltGatewayClient
+from .client.DltEventsCollector import DltEventsCollector
+
+logging.basicConfig(level=logging.INFO)
+LOGGER = logging.getLogger(__name__)
+
+def main():
+    dltgateway_client = DltGatewayClient(host='127.0.0.1', port=50051)
+    dltgateway_collector = DltEventsCollector(dltgateway_client, log_events_received=True)
+    dltgateway_collector.start()
+
+    time.sleep(3)
+
+    device = Device(**json_device('dev-1', 'packet-router', DEVICEOPERATIONALSTATUS_ENABLED))
+
+    r2dlt_req = DltRecord()
+    r2dlt_req.record_id.domain_uuid.uuid = 'tfs-a'
+    r2dlt_req.record_id.type             = DLTRECORDTYPE_DEVICE
+    r2dlt_req.record_id.record_uuid.uuid = device.device_id.device_uuid.uuid
+    r2dlt_req.operation                  = DLTRECORDOPERATION_ADD
+    r2dlt_req.data_json                  = grpc_message_to_json_string(device)
+    LOGGER.info('r2dlt_req = {:s}'.format(grpc_message_to_json_string(r2dlt_req)))
+    r2dlt_rep = dltgateway_client.RecordToDlt(r2dlt_req)
+    LOGGER.info('r2dlt_rep = {:s}'.format(grpc_message_to_json_string(r2dlt_rep)))
+
+    dlt2r_req = r2dlt_req.record_id
+    LOGGER.info('dlt2r_req = {:s}'.format(grpc_message_to_json_string(dlt2r_req)))
+    dlt2r_rep = dltgateway_client.GetFromDlt(dlt2r_req)
+    LOGGER.info('dlt2r_rep = {:s}'.format(grpc_message_to_json_string(dlt2r_rep)))
+
+    dltgateway_collector.stop()
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/dlt/connector/requirements.in b/src/dlt/connector/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/dlt/connector/service/DltConnector.py b/src/dlt/connector/service/DltConnector.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c42d66852e8eb895a07c761f7535a0d768a9e91
--- /dev/null
+++ b/src/dlt/connector/service/DltConnector.py
@@ -0,0 +1,51 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, threading
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from context.client.ContextClient import ContextClient
+from context.client.EventsCollector import EventsCollector
+from dlt.connector.client.DltConnectorClient import DltConnectorClient
+
+LOGGER = logging.getLogger(__name__)
+
+class DltConnector:
+    def __init__(self) -> None:
+        LOGGER.debug('Creating connector...')
+        self._terminate = threading.Event()
+        self._thread = None
+        LOGGER.debug('Connector created')
+
+    def start(self):
+        self._terminate.clear()
+        self._thread = threading.Thread(target=self._run_events_collector)
+        self._thread.start()
+
+    def _run_events_collector(self) -> None:
+        dltconnector_client = DltConnectorClient()
+        context_client = ContextClient()
+        events_collector = EventsCollector(context_client)
+        events_collector.start()
+
+        while not self._terminate.is_set():
+            event = events_collector.get_event()
+            LOGGER.info('Event from Context Received: {:s}'.format(grpc_message_to_json_string(event)))
+
+        events_collector.stop()
+        context_client.close()
+        dltconnector_client.close()
+
+    def stop(self):
+        self._terminate.set()
+        self._thread.join()
diff --git a/src/dlt/connector/service/DltConnectorService.py b/src/dlt/connector/service/DltConnectorService.py
new file mode 100644
index 0000000000000000000000000000000000000000..40237b628776f7053092b45d036072fbde35253c
--- /dev/null
+++ b/src/dlt/connector/service/DltConnectorService.py
@@ -0,0 +1,28 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from common.proto.dlt_connector_pb2_grpc import add_DltConnectorServiceServicer_to_server
+from .DltConnectorServiceServicerImpl import DltConnectorServiceServicerImpl
+
+class DltConnectorService(GenericGrpcService):
+    def __init__(self, cls_name: str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.DLT)
+        super().__init__(port, cls_name=cls_name)
+        self.dltconnector_servicer = DltConnectorServiceServicerImpl()
+
+    def install_servicers(self):
+        add_DltConnectorServiceServicer_to_server(self.dltconnector_servicer, self.server)
diff --git a/src/dlt/connector/service/DltConnectorServiceServicerImpl.py b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..860e46f3ab88b097f4aa8e06508b19518055e46f
--- /dev/null
+++ b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py
@@ -0,0 +1,62 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
+from common.proto.context_pb2 import DeviceId, Empty, ServiceId, SliceId
+from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceServicer
+
+LOGGER = logging.getLogger(__name__)
+
+SERVICE_NAME = 'DltConnector'
+METHOD_NAMES = [
+    'RecordAll',
+    'RecordAllDevices', 'RecordDevice',
+    'RecordAllServices', 'RecordService',
+    'RecordAllSlices', 'RecordSlice',
+]
+METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES)
+
+class DltConnectorServiceServicerImpl(DltConnectorServiceServicer):
+    def __init__(self):
+        LOGGER.debug('Creating Servicer...')
+        LOGGER.debug('Servicer Created')
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def RecordAll(self, request : Empty, context : grpc.ServicerContext) -> Empty:
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def RecordAllDevices(self, request : Empty, context : grpc.ServicerContext) -> Empty:
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def RecordDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty:
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def RecordAllServices(self, request : Empty, context : grpc.ServicerContext) -> Empty:
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def RecordService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty:
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def RecordAllSlices(self, request : Empty, context : grpc.ServicerContext) -> Empty:
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def RecordSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty:
+        return Empty()
diff --git a/src/dlt/connector/service/__init__.py b/src/dlt/connector/service/__init__.py
index 70a33251242c51f49140e596b8208a19dd5245f7..9953c820575d42fa88351cc8de022d880ba96e6a 100644
--- a/src/dlt/connector/service/__init__.py
+++ b/src/dlt/connector/service/__init__.py
@@ -11,4 +11,3 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
diff --git a/src/dlt/connector/service/__main__.py b/src/dlt/connector/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..435a93f61bf934a17d9c044756648176e9cb2d2d
--- /dev/null
+++ b/src/dlt/connector/service/__main__.py
@@ -0,0 +1,65 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from prometheus_client import start_http_server
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
+    wait_for_environment_variables)
+from .DltConnectorService import DltConnectorService
+
+terminate = threading.Event()
+LOGGER : logging.Logger = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global LOGGER # pylint: disable=global-statement
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level)
+    LOGGER = logging.getLogger(__name__)
+
+    wait_for_environment_variables([
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     ),
+        get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+    ])
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.info('Starting...')
+
+    # Start metrics server
+    metrics_port = get_metrics_port()
+    start_http_server(metrics_port)
+
+    # Starting DLT connector service
+    grpc_service = DltConnectorService()
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=0.1): pass
+
+    LOGGER.info('Terminating...')
+    grpc_service.stop()
+
+    LOGGER.info('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/dlt/connector/tests/MockService_Dependencies.py b/src/dlt/connector/tests/MockService_Dependencies.py
new file mode 100644
index 0000000000000000000000000000000000000000..65ddc3cb48cb878b2ab5ba8b5ec44479b0b71451
--- /dev/null
+++ b/src/dlt/connector/tests/MockService_Dependencies.py
@@ -0,0 +1,38 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from typing import Union
+from common.Constants import ServiceNameEnum
+from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name
+from common.proto.dlt_gateway_pb2_grpc import add_DltGatewayServiceServicer_to_server
+from common.tests.MockServicerImpl_DltGateway import MockServicerImpl_DltGateway
+from common.tools.service.GenericGrpcService import GenericGrpcService
+
+LOCAL_HOST = '127.0.0.1'
+
+SERVICE_DLT = ServiceNameEnum.DLT
+
+class MockService_Dependencies(GenericGrpcService):
+    def __init__(self, bind_port: Union[str, int]) -> None:
+        super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService')
+
+    # pylint: disable=attribute-defined-outside-init
+    def install_servicers(self):
+        self.dltgateway_servicer = MockServicerImpl_DltGateway()
+        add_DltGatewayServiceServicer_to_server(self.dltgateway_servicer, self.server)
+
+    def configure_env_vars(self):
+        os.environ[get_env_var_name(SERVICE_DLT, ENVVAR_SUFIX_SERVICE_HOST     )] = str(self.bind_address)
+        os.environ[get_env_var_name(SERVICE_DLT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(self.bind_port)
diff --git a/src/dlt/connector/tests/Objects.py b/src/dlt/connector/tests/Objects.py
new file mode 100644
index 0000000000000000000000000000000000000000..f797e93e6f2f4f6597a667fff61b2b8ba1cbd72a
--- /dev/null
+++ b/src/dlt/connector/tests/Objects.py
@@ -0,0 +1,81 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.tools.object_factory.Context import json_context, json_context_id
+from common.tools.object_factory.Device import json_device_emulated_packet_router_disabled, json_device_id
+from common.tools.object_factory.EndPoint import json_endpoints
+from common.tools.object_factory.Link import compose_link
+from common.tools.object_factory.Topology import json_topology, json_topology_id
+
+def compose_device(
+    device_uuid, endpoint_uuids, endpoint_type='copper', endpoint_topology_id=None, endpoint_sample_types=[]
+):
+    device_id = json_device_id(device_uuid)
+    endpoints = [(endpoint_uuid, endpoint_type, endpoint_sample_types) for endpoint_uuid in endpoint_uuids]
+    endpoints = json_endpoints(device_id, endpoints, topology_id=endpoint_topology_id)
+    device = json_device_emulated_packet_router_disabled(device_uuid, endpoints=endpoints)
+    return device_id, endpoints, device
+
+# ===== Domain A =======================================================================================================
+
+# ----- Context --------------------------------------------------------------------------------------------------------
+DA_CONTEXT_ADMIN_ID = json_context_id('A')
+DA_CONTEXT_ADMIN    = json_context('A')
+
+# ----- Topology -------------------------------------------------------------------------------------------------------
+DA_TOPOLOGY_ADMIN_ID = json_topology_id('A', context_id=DA_CONTEXT_ADMIN_ID)
+DA_TOPOLOGY_ADMIN    = json_topology('A', context_id=DA_CONTEXT_ADMIN_ID)
+
+# ----- Devices --------------------------------------------------------------------------------------------------------
+DA_DEVICE_DEV1_ID, DA_DEVICE_DEV1_ENDPOINTS, DA_DEVICE_DEV1 = compose_device('DEV1@A', ['1', '2'])
+DA_DEVICE_DEV2_ID, DA_DEVICE_DEV2_ENDPOINTS, DA_DEVICE_DEV2 = compose_device('DEV2@A', ['1', '2'])
+DA_DEVICE_DEV3_ID, DA_DEVICE_DEV3_ENDPOINTS, DA_DEVICE_DEV3 = compose_device('DEV3@A', ['1', '2'])
+
+# ----- Links ----------------------------------------------------------------------------------------------------------
+DA_LINK_DEV1_DEV2_ID, DA_LINK_DEV1_DEV2 = compose_link(DA_DEVICE_DEV1_ENDPOINTS[0], DA_DEVICE_DEV2_ENDPOINTS[0])
+DA_LINK_DEV1_DEV3_ID, DA_LINK_DEV1_DEV3 = compose_link(DA_DEVICE_DEV1_ENDPOINTS[1], DA_DEVICE_DEV3_ENDPOINTS[0])
+DA_LINK_DEV2_DEV3_ID, DA_LINK_DEV2_DEV3 = compose_link(DA_DEVICE_DEV2_ENDPOINTS[1], DA_DEVICE_DEV3_ENDPOINTS[1])
+
+# ----- Containers -----------------------------------------------------------------------------------------------------
+DA_CONTEXTS   = [DA_CONTEXT_ADMIN]
+DA_TOPOLOGIES = [DA_TOPOLOGY_ADMIN]
+DA_DEVICES    = [DA_DEVICE_DEV1, DA_DEVICE_DEV2, DA_DEVICE_DEV3]
+DA_LINKS      = [DA_LINK_DEV1_DEV2, DA_LINK_DEV1_DEV3, DA_LINK_DEV2_DEV3]
+
+
+# ===== Domain B =======================================================================================================
+
+# ----- Context --------------------------------------------------------------------------------------------------------
+DB_CONTEXT_ADMIN_ID = json_context_id('B')
+DB_CONTEXT_ADMIN    = json_context('B')
+
+# ----- Topology -------------------------------------------------------------------------------------------------------
+DB_TOPOLOGY_ADMIN_ID = json_topology_id('B', context_id=DB_CONTEXT_ADMIN_ID)
+DB_TOPOLOGY_ADMIN    = json_topology('B', context_id=DB_CONTEXT_ADMIN_ID)
+
+# ----- Devices --------------------------------------------------------------------------------------------------------
+DB_DEVICE_DEV1_ID, DB_DEVICE_DEV1_ENDPOINTS, DB_DEVICE_DEV1 = compose_device('DEV1@B', ['1', '2'])
+DB_DEVICE_DEV2_ID, DB_DEVICE_DEV2_ENDPOINTS, DB_DEVICE_DEV2 = compose_device('DEV2@B', ['1', '2'])
+DB_DEVICE_DEV3_ID, DB_DEVICE_DEV3_ENDPOINTS, DB_DEVICE_DEV3 = compose_device('DEV3@B', ['1', '2'])
+
+# ----- Links ----------------------------------------------------------------------------------------------------------
+DB_LINK_DEV1_DEV2_ID, DB_LINK_DEV1_DEV2 = compose_link(DB_DEVICE_DEV1_ENDPOINTS[0], DB_DEVICE_DEV2_ENDPOINTS[0])
+DB_LINK_DEV1_DEV3_ID, DB_LINK_DEV1_DEV3 = compose_link(DB_DEVICE_DEV1_ENDPOINTS[1], DB_DEVICE_DEV3_ENDPOINTS[0])
+DB_LINK_DEV2_DEV3_ID, DB_LINK_DEV2_DEV3 = compose_link(DB_DEVICE_DEV2_ENDPOINTS[1], DB_DEVICE_DEV3_ENDPOINTS[1])
+
+# ----- Containers -----------------------------------------------------------------------------------------------------
+DB_CONTEXTS   = [DB_CONTEXT_ADMIN]
+DB_TOPOLOGIES = [DB_TOPOLOGY_ADMIN]
+DB_DEVICES    = [DB_DEVICE_DEV1, DB_DEVICE_DEV2, DB_DEVICE_DEV3]
+DB_LINKS      = [DB_LINK_DEV1_DEV2, DB_LINK_DEV1_DEV3, DB_LINK_DEV2_DEV3]
diff --git a/src/dlt/connector/tests/PrepareTestScenario.py b/src/dlt/connector/tests/PrepareTestScenario.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c5d1cb5cc1c6868a5b47d929f026deecbe52f52
--- /dev/null
+++ b/src/dlt/connector/tests/PrepareTestScenario.py
@@ -0,0 +1,109 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os, pytest
+from typing import Tuple
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
+from common.orm.Database import Database
+from common.orm.Factory import get_database_backend, BackendEnum as DatabaseBackendEnum
+from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum
+from common.message_broker.MessageBroker import MessageBroker
+from context.client.ContextClient import ContextClient
+from context.service.grpc_server.ContextService import ContextService
+from dlt.connector.client.DltConnectorClient import DltConnectorClient
+from dlt.connector.service.DltConnectorService import DltConnectorService
+from .MockService_Dependencies import MockService_Dependencies
+
+LOCAL_HOST = '127.0.0.1'
+MOCKSERVICE_PORT = 10000
+#GRPC_PORT = 10000 + get_service_port_grpc(ServiceNameEnum.CONTEXT) # avoid privileged ports
+#os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+#os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT)
+
+# ===== BlockChain Emulator (Mock DLT Gateway) =========================================================================
+# A single gateway is used for all the domains
+
+@pytest.fixture(scope='session')
+def dltgateway_service():
+    _service = MockService_Dependencies(MOCKSERVICE_PORT)
+    _service.configure_env_vars()
+    _service.start()
+    yield _service
+    _service.stop()
+
+# ===== Domain A (Real Context + Real DLT Connector) ===================================================================
+
+@pytest.fixture(scope='session')
+def context_service_a(): # pylint: disable=redefined-outer-name
+    _database = Database(get_database_backend(backend=DatabaseBackendEnum.INMEMORY))
+    _message_broker = MessageBroker(get_messagebroker_backend(backend=MessageBrokerBackendEnum.INMEMORY))
+    _service = ContextService(_database, _message_broker)
+    _service.start()
+    yield _service
+    _service.stop()
+    _message_broker.terminate()
+
+@pytest.fixture(scope='session')
+def context_client_a(context_service_a : ContextService): # pylint: disable=redefined-outer-name
+    _client = ContextClient(host=context_service_a.bind_address, port=context_service_a.bind_port)
+    yield _client
+    _client.close()
+
+@pytest.fixture(scope='session')
+def dltconnector_service_a():
+    _service = DltConnectorService()
+    _service.bind_port += 1
+    _service.start()
+    yield _service
+    _service.stop()
+
+@pytest.fixture(scope='session')
+def dltconnector_client_a(dltconnector_service_a : DltConnectorService): # pylint: disable=redefined-outer-name
+    _client = DltConnectorClient(host=dltconnector_service_a.bind_address, port=dltconnector_service_a.bind_port)
+    yield _client
+    _client.close()
+
+# ===== Domain B (Real Context + Real DLT Connector) ===================================================================
+
+@pytest.fixture(scope='session')
+def context_service_b(): # pylint: disable=redefined-outer-name
+    _database = Database(get_database_backend(backend=DatabaseBackendEnum.INMEMORY))
+    _message_broker = MessageBroker(get_messagebroker_backend(backend=MessageBrokerBackendEnum.INMEMORY))
+    _service = ContextService(_database, _message_broker)
+    _service.start()
+    yield _service
+    _service.stop()
+    _message_broker.terminate()
+
+@pytest.fixture(scope='session')
+def context_client_b(context_service_b : ContextService): # pylint: disable=redefined-outer-name
+    _client = ContextClient(host=context_service_b.bind_address, port=context_service_b.bind_port)
+    yield _client
+    _client.close()
+
+@pytest.fixture(scope='session')
+def dltconnector_service_b():
+    _service = DltConnectorService()
+    _service.bind_port += 2
+    _service.start()
+    yield _service
+    _service.stop()
+
+@pytest.fixture(scope='session')
+def dltconnector_client_b(dltconnector_service_b : DltConnectorService): # pylint: disable=redefined-outer-name
+    _client = DltConnectorClient(host=dltconnector_service_b.bind_address, port=dltconnector_service_b.bind_port)
+    yield _client
+    _client.close()
diff --git a/src/dlt/connector/tests/__init__.py b/src/dlt/connector/tests/__init__.py
index 70a33251242c51f49140e596b8208a19dd5245f7..9953c820575d42fa88351cc8de022d880ba96e6a 100644
--- a/src/dlt/connector/tests/__init__.py
+++ b/src/dlt/connector/tests/__init__.py
@@ -11,4 +11,3 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
diff --git a/src/dlt/connector/tests/test_unitary.py b/src/dlt/connector/tests/test_unitary.py
new file mode 100644
index 0000000000000000000000000000000000000000..00c1164e1becde1d56de2a6c53c51160a31fc6f7
--- /dev/null
+++ b/src/dlt/connector/tests/test_unitary.py
@@ -0,0 +1,54 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Tuple
+from common.orm.Database import Database
+from common.message_broker.MessageBroker import MessageBroker
+from common.proto.context_pb2 import Context, ContextId, Device, DeviceId, Link, LinkId, Topology, TopologyId
+from context.client.ContextClient import ContextClient
+from .PrepareTestScenario import (
+    # pylint: disable=unused-import
+    dltgateway_service,
+    context_service_a, context_client_a, dltconnector_service_a, dltconnector_client_a,
+    context_service_b, context_client_b, dltconnector_service_b, dltconnector_client_b)
+from .Objects import (
+    DA_CONTEXTS, DA_TOPOLOGIES, DA_DEVICES, DA_LINKS,
+    DB_CONTEXTS, DB_TOPOLOGIES, DB_DEVICES, DB_LINKS)
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+def test_create_events(
+    context_client : ContextClient,                     # pylint: disable=redefined-outer-name
+    context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
+
+    for context  in CONTEXTS  : context_client.SetContext (Context (**context ))
+    for topology in TOPOLOGIES: context_client.SetTopology(Topology(**topology))
+    for device   in DEVICES   : context_client.SetDevice  (Device  (**device  ))
+    for link     in LINKS     : context_client.SetLink    (Link    (**link    ))
+
+
+    for link     in LINKS     : context_client.RemoveLink    (LinkId    (**link    ['link_id'    ]))
+    for device   in DEVICES   : context_client.RemoveDevice  (DeviceId  (**device  ['device_id'  ]))
+    for topology in TOPOLOGIES: context_client.RemoveTopology(TopologyId(**topology['topology_id']))
+    for context  in CONTEXTS  : context_client.RemoveContext (ContextId (**context ['context_id' ]))
+
+
+
+    dltgateway_client = DltGatewayClient(host='127.0.0.1', port=50051)
+    dltgateway_collector = DltEventsCollector(dltgateway_client, log_events_received=True)
+    dltgateway_collector.start()
+
+    dltgateway_collector.stop()
diff --git a/src/dlt/gateway/.gitignore b/src/dlt/gateway/.gitignore
index 1de6c650e4e3891fba0a81d585634f635e03a5c4..9ecdb254cd217f06171ac30934a34f898a7d77dc 100644
--- a/src/dlt/gateway/.gitignore
+++ b/src/dlt/gateway/.gitignore
@@ -87,4 +87,4 @@ gradle-app.setting
 .gradletasknamecache
 
 local.properties
-wallet/
\ No newline at end of file
+wallet*/
\ No newline at end of file
diff --git a/src/dlt/gateway/Dockerfile b/src/dlt/gateway/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..92ef8e425f40eaf718c4562c836517128dbb2d6f
--- /dev/null
+++ b/src/dlt/gateway/Dockerfile
@@ -0,0 +1,41 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM zenika/kotlin:1.4-jdk12
+
+# Make working directory move to it and copy DLT Gateway code
+RUN mkdir -p /var/teraflow/dlt/gateway
+WORKDIR /var/teraflow/dlt/gateway
+COPY src/dlt/gateway/. ./
+
+# Make directory for proto files and copy them
+RUN mkdir proto
+COPY proto/*.proto ./proto/
+
+# Build DLT Gateway
+RUN ./gradlew build
+
+EXPOSE 50051
+
+# Create entrypoint.sh script
+RUN echo "#!/bin/sh" > /entrypoint.sh
+RUN echo "echo 195.37.154.24 peer0.org1.example.com >> /etc/hosts" >> /entrypoint.sh
+RUN echo "echo 195.37.154.24 peer0.org2.example.com >> /etc/hosts" >> /entrypoint.sh
+RUN echo "echo 195.37.154.24 orderer0.example.com   >> /etc/hosts" >> /entrypoint.sh
+RUN echo "cd /var/teraflow/dlt/gateway" >> /entrypoint.sh
+RUN echo "./gradlew runServer" >> /entrypoint.sh
+RUN chmod +x /entrypoint.sh
+
+# Gateway entry point
+ENTRYPOINT ["sh", "/entrypoint.sh"]
diff --git a/src/dlt/gateway/README.md b/src/dlt/gateway/README.md
index 361de07c6a35fb7951f063a9aa6fc3fb28d3ba0d..2cf6cfeb1682ade5a77f53fe13c96daed6dc33fd 100644
--- a/src/dlt/gateway/README.md
+++ b/src/dlt/gateway/README.md
@@ -13,7 +13,7 @@
  duplication of the object or source code - either totally or in
  part - is strictly prohibited.
 
-          Copyright (c) 2021 NEC Laboratories Europe GmbH
+          Copyright (c) 2022 NEC Laboratories Europe GmbH
           All Rights Reserved.
 
  Authors: Konstantin Munichev <konstantin.munichev@neclab.eu>
@@ -42,7 +42,7 @@
 ## General information
 The DLT module is used to provide access to the underlying Fabric deployment. It allows clients
 to add, retrieve, modify and delete blockchain-backed data, essentially working as a key-value
-database. External clients should use REST API to communicate with this service, its detailed
+database. External clients should use gRPC API to communicate with this service, its detailed
 description available below.
 
 ## Code structure
@@ -59,26 +59,76 @@ CRUD interface.
 Other files contain auxiliary code for `FabricConnector` which allows it to register/enroll
 users and to obtain smart contract instances.
 
-### HTTP package
-Contains server side HTTP handler. It accepts requests from the outside and performs the
-requested operation. For the detailed description see API description section.
+### Grpc package
+Contains server side gRPC handler. It accepts requests from the outside and performs the
+requested operation. For the more detailed description see Proto package description right below.
 
 ### Proto package
-The proto package contains `Config.proto` file which contains messages for REST API. The most
-important ones are `DltConfig` (it defines the whole DLT configuration) and `DltRecord` which
-represents data to store in the blockchain.
+The proto package contains `dlt.proto` file which defines gRPC service `DltService` API and messages
+it uses. There are 3 main functions: `RecordToDlt` which allows to create/modify/delete data,
+`GetFromDlt` which returns already written data and `SubscribeToDlt` which allows clients subscribe
+for future create/modify/delete events with provided filters.
+Other proto files don't play any significant role and could be safely ignored by end users.
 
 ### Client example
 This code is not necessary to the service, but it could be used to test the service. It contains
-a sample REST client which connects the service and perform all the CRUD operations. 
-
-## REST API description
-| Method | URL | Input | Response code | Output |
-| --- | ----------- | --- | --- | --- |
-| POST | /dlt/configure | Configuration object | 201 or 400 | Status value | 
-| GET | /dlt/configure | - | 200 or 404 | Configuration object |
-| POST | /dlt/record | Record object | 200, 201, 400 or 404 | Status value |
-| GET | /dlt/record | Record id | 200 or 404 | Record object |
-
-Record and configuration object are defined in `proto` package.
-
+a sample gRPC client which connects the service and perform all the CRUD operations. 
+
+# Fabric deployment notes
+
+## General notes
+Current Fabric deployment uses Fabric test network with some additional helping scripts on top of it.
+To start the network just run the `raft.sh` from `blockchain/scripts` directory. Use `stop.sh`
+when you need to stop the network.
+
+## Server start preparations
+To run the server it's necessary to copy certificate file
+`fabric-samples/test-network/organizations/peerOrganizations/org1.example.com/ca/ca.org1.example.com-cert.pem`
+to the config folder (replacing the existing one). Also, it's necessary to copy `scripts/connection-org1.json`
+file (again, replacing the old one). After copying, it must be edited. First, all `localhost` entrances
+should be replaced with `teraflow.nlehd.de`. Second, `channel` section at the end of the file should be removed.
+This should be done after every restart of the Fabric network.
+
+## Fabric configuration
+Even though a test network is easy to deploy and use it's better to perform a custom configuration
+for a production deployment. In practice every participating organization will likely prefer to have
+its own Peer/Orderer/CA instances to prevent possible dependency on any other participants. This leads
+not only to a better privacy/availability/security in general but also to the more complicated
+deployment process as a side effect. Here we provide a very brief description of the most important points.
+
+### Organizations
+Organization represents a network participant, which can be an individual, a large corporation or any other
+entity. Each organization has its own CAs, orderers and peers. The recommendation here is to create an
+organization entity for every independent participant and then decide how many CAs/peers/orderers does
+every organization need and which channels should it has access to based on the exact project's goals. 
+
+### Channels
+Each channel represents an independent ledger with its own genesis block. Each transaction is executed
+on a specific channel, and it's possible to define which organization has access to a given channel.
+As a result channels are a pretty powerful privacy mechanism which allows to limit access to the private
+data between organization.
+
+### Certificate authorities, peers and orderers
+Certificate authorities (CA) are used to generate crypto materials for each organization. Two types of CA
+exist: one is used to generate the certificates of the admin, the MSP and certificates of non-admin users.
+Another type of CA is used to generate TLS certificates. As a result it's preferable to have at least two
+CAs for every organization.
+
+Peers are entities which host ledgers and smart contracts. They communicate with applications and orderers,
+receiving chaincode invocations (proposals), invoking chaincode, updating ledger when necessary and
+returning result of execution. Peers can handle one or many ledgers, depending on the configuration. It's
+very use case specific how many peers are necessary to the exact deployment.
+
+Orderers are used to execute a consensus in a distributing network making sure that every channel participant
+has the same blocks with the same data. The default consensus algorithm is Raft which provides only a crash
+fault tolerance.
+
+### Conclusion
+As you can see, configuration procedure for Fabric is pretty tricky and includes quite a lot of entities.
+In real world it will very likely involve participants from multiple organizations each of them performing
+its own part of configuration.
+
+As a further reading it's recommended to start with the
+[official deployment guide](https://hyperledger-fabric.readthedocs.io/en/release-2.2/deployment_guide_overview.html).
+It contains a high level overview of a deployment process as well as links to the detailed descriptions to
+CA/Peer/Orderer configuration descriptions.
\ No newline at end of file
diff --git a/src/dlt/gateway/build.gradle.kts b/src/dlt/gateway/build.gradle.kts
index 8eb0d53fa99ec972edd6ee03aafeb0d676f3d3c0..b65aff89e18077ffaff37ea732293f585ca7920d 100644
--- a/src/dlt/gateway/build.gradle.kts
+++ b/src/dlt/gateway/build.gradle.kts
@@ -39,16 +39,19 @@ import org.jetbrains.kotlin.gradle.tasks.KotlinCompile
 
 import com.google.protobuf.gradle.generateProtoTasks
 import com.google.protobuf.gradle.id
+import com.google.protobuf.gradle.plugins
 import com.google.protobuf.gradle.protobuf
 import com.google.protobuf.gradle.protoc
 
-ext["protobufVersion"] = "3.19.1"
+ext["grpcVersion"] = "1.47.0"
+ext["grpcKotlinVersion"] = "1.3.0" // CURRENT_GRPC_KOTLIN_VERSION
+ext["protobufVersion"] = "3.20.1"
 ext["ktorVersion"] = "1.6.5"
 
 plugins {
-    kotlin("jvm") version "1.5.31"
+    kotlin("jvm") version "1.6.21"
     kotlin("plugin.serialization") version "1.4.21"
-    id("com.google.protobuf") version "0.8.17"
+    id("com.google.protobuf") version "0.8.18"
     application
 }
 
@@ -56,22 +59,24 @@ group = "eu.neclab"
 version = "1.0-SNAPSHOT"
 
 repositories {
+    mavenLocal()
+    google()
     mavenCentral()
 }
 
 dependencies {
-    testImplementation("org.jetbrains.kotlin:kotlin-test:1.5.31")
-    implementation("org.hyperledger.fabric:fabric-gateway-java:2.2.2")
-    api("com.google.protobuf:protobuf-kotlin:${rootProject.ext["protobufVersion"]}")
-    implementation("io.ktor:ktor-server-core:${rootProject.ext["ktorVersion"]}")
-    implementation("io.ktor:ktor-server-netty:${rootProject.ext["ktorVersion"]}")
-    implementation("io.ktor:ktor-serialization:${rootProject.ext["ktorVersion"]}")
-    implementation("io.ktor:ktor-client-serialization:${rootProject.ext["ktorVersion"]}")
-    implementation("io.ktor:ktor-client-core:${rootProject.ext["ktorVersion"]}")
-    implementation("io.ktor:ktor-client-cio:${rootProject.ext["ktorVersion"]}")
-    implementation("ch.qos.logback:logback-classic:1.2.5")
+    implementation(kotlin("stdlib-jdk8"))
+    testImplementation("org.jetbrains.kotlin:kotlin-test:1.6.21")
+    implementation("javax.annotation:javax.annotation-api:1.3.2")
+    implementation("io.grpc:grpc-kotlin-stub:1.3.0")
+    implementation("io.grpc:grpc-protobuf:1.47.0")
+    implementation("com.google.protobuf:protobuf-kotlin:3.21.1")
+    implementation("org.jetbrains.kotlinx:kotlinx-coroutines-core:1.6.3")
+    implementation("org.hyperledger.fabric:fabric-gateway-java:2.2.5")
+    implementation("ch.qos.logback:logback-classic:1.2.11")
     implementation("org.jetbrains.kotlinx:kotlinx-serialization-json:1.3.1")
     implementation("org.jetbrains.kotlinx:kotlinx-serialization-protobuf:1.3.1")
+    runtimeOnly("io.grpc:grpc-netty:${rootProject.ext["grpcVersion"]}")
 }
 
 tasks.test {
@@ -93,23 +98,50 @@ application {
     mainClass.set("MainKt")
 }
 
+task("runServer", JavaExec::class) {
+    main = "grpc.FabricServerKt"
+    classpath = sourceSets["main"].runtimeClasspath
+}
+
+
 sourceSets {
     main {
         proto {
+            srcDir("proto")
             srcDir("src/main/kotlin/proto")
         }
     }
 }
 
+sourceSets {
+    val main by getting { }
+    main.java.srcDirs("build/generated/source/proto/main/grpc")
+    main.java.srcDirs("build/generated/source/proto/main/grpckt")
+    main.java.srcDirs("build/generated/source/proto/main/java")
+    main.java.srcDirs("build/generated/source/proto/main/kotlin")
+}
+
 protobuf {
     protoc {
         artifact = "com.google.protobuf:protoc:${rootProject.ext["protobufVersion"]}"
     }
+    plugins {
+        id("grpc") {
+            artifact = "io.grpc:protoc-gen-grpc-java:${rootProject.ext["grpcVersion"]}"
+        }
+        id("grpckt") {
+            artifact = "io.grpc:protoc-gen-grpc-kotlin:${rootProject.ext["grpcKotlinVersion"]}:jdk8@jar"
+        }
+    }
     generateProtoTasks {
         all().forEach {
+            it.plugins {
+                id("grpc")
+                id("grpckt")
+            }
             it.builtins {
                 id("kotlin")
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/dlt/gateway/config/ca.org1.example.com-cert.pem b/src/dlt/gateway/config/ca.org1.example.com-cert.pem
index 5287a0f2bada9649c0d2ddd8eb8a71b2dac10df7..d7fdf63cc3f745d13edc8394bca67a1b41011ed2 100644
--- a/src/dlt/gateway/config/ca.org1.example.com-cert.pem
+++ b/src/dlt/gateway/config/ca.org1.example.com-cert.pem
@@ -1,14 +1,14 @@
 -----BEGIN CERTIFICATE-----
-MIICJjCCAc2gAwIBAgIUWZ4l32loO9+FM0FYw61y3dUF5a0wCgYIKoZIzj0EAwIw
+MIICJzCCAc2gAwIBAgIUb5gDMfVeVdQjFkK3uC8LtlogN+gwCgYIKoZIzj0EAwIw
 cDELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYDVQQH
 EwZEdXJoYW0xGTAXBgNVBAoTEG9yZzEuZXhhbXBsZS5jb20xHDAaBgNVBAMTE2Nh
-Lm9yZzEuZXhhbXBsZS5jb20wHhcNMjIwNzA1MDk0NDAwWhcNMzcwNzAxMDk0NDAw
+Lm9yZzEuZXhhbXBsZS5jb20wHhcNMjIwOTI3MDgzMDAwWhcNMzcwOTIzMDgzMDAw
 WjBwMQswCQYDVQQGEwJVUzEXMBUGA1UECBMOTm9ydGggQ2Fyb2xpbmExDzANBgNV
 BAcTBkR1cmhhbTEZMBcGA1UEChMQb3JnMS5leGFtcGxlLmNvbTEcMBoGA1UEAxMT
-Y2Eub3JnMS5leGFtcGxlLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABNPg
-yfDxHr4ZmFp3HB19f27vfc1YTKBnznLqIFwVad2Y+eXfni8DnTRNGgwdkG9uIK2L
-4Y9mwlKG/mTNx629G4GjRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAG
-AQH/AgEBMB0GA1UdDgQWBBSZlT6qe+DAGpEBXyMxzidqCkQ4PjAKBggqhkjOPQQD
-AgNHADBEAiAIG5jwBGddB9CwocmjAzFv8+e7+0bvNSwjrG229QogTgIgbTNoC33P
-mbR5ChlkUAW2t41hTOCSMIwLAlvEwpeCnAk=
+Y2Eub3JnMS5leGFtcGxlLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABDC3
+spCTT3pjfFXxkX/SFuBgWRiceR8rSoCNQOnIPeNGZK8xl2Zr7VuY06gqy9c+ecSU
+PUWaXiCQxiLgZuS6TOWjRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAG
+AQH/AgEBMB0GA1UdDgQWBBRFWSc7GZqcJJyJjXSEspzgAYInGzAKBggqhkjOPQQD
+AgNIADBFAiEAodqc+adkiMuU6iv1IF8uJ/nMQbvMGoP3pb2827QzDosCICOw6W+y
+uH03H3RO6KhOcS1ZzPjspyjrcC+dwzYX4DpW
 -----END CERTIFICATE-----
diff --git a/src/dlt/gateway/config/connection-org1.json b/src/dlt/gateway/config/connection-org1.json
index 320a20806650edaea9de45d0e208bc2b8dea12f0..6f6f3f08d65c495bb57551f6d0bfac38c9a2f8cc 100644
--- a/src/dlt/gateway/config/connection-org1.json
+++ b/src/dlt/gateway/config/connection-org1.json
@@ -24,9 +24,9 @@
     },
     "peers": {
         "peer0.org1.example.com": {
-            "url": "grpcs://s2:7051",
+            "url": "grpcs://teraflow.nlehd.de:7051",
             "tlsCACerts": {
-                "pem": "-----BEGIN CERTIFICATE-----\nMIICJjCCAc2gAwIBAgIUWZ4l32loO9+FM0FYw61y3dUF5a0wCgYIKoZIzj0EAwIw\ncDELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYDVQQH\nEwZEdXJoYW0xGTAXBgNVBAoTEG9yZzEuZXhhbXBsZS5jb20xHDAaBgNVBAMTE2Nh\nLm9yZzEuZXhhbXBsZS5jb20wHhcNMjIwNzA1MDk0NDAwWhcNMzcwNzAxMDk0NDAw\nWjBwMQswCQYDVQQGEwJVUzEXMBUGA1UECBMOTm9ydGggQ2Fyb2xpbmExDzANBgNV\nBAcTBkR1cmhhbTEZMBcGA1UEChMQb3JnMS5leGFtcGxlLmNvbTEcMBoGA1UEAxMT\nY2Eub3JnMS5leGFtcGxlLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABNPg\nyfDxHr4ZmFp3HB19f27vfc1YTKBnznLqIFwVad2Y+eXfni8DnTRNGgwdkG9uIK2L\n4Y9mwlKG/mTNx629G4GjRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAG\nAQH/AgEBMB0GA1UdDgQWBBSZlT6qe+DAGpEBXyMxzidqCkQ4PjAKBggqhkjOPQQD\nAgNHADBEAiAIG5jwBGddB9CwocmjAzFv8+e7+0bvNSwjrG229QogTgIgbTNoC33P\nmbR5ChlkUAW2t41hTOCSMIwLAlvEwpeCnAk=\n-----END CERTIFICATE-----\n"
+                "pem": "-----BEGIN CERTIFICATE-----\nMIICJzCCAc2gAwIBAgIUb5gDMfVeVdQjFkK3uC8LtlogN+gwCgYIKoZIzj0EAwIw\ncDELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYDVQQH\nEwZEdXJoYW0xGTAXBgNVBAoTEG9yZzEuZXhhbXBsZS5jb20xHDAaBgNVBAMTE2Nh\nLm9yZzEuZXhhbXBsZS5jb20wHhcNMjIwOTI3MDgzMDAwWhcNMzcwOTIzMDgzMDAw\nWjBwMQswCQYDVQQGEwJVUzEXMBUGA1UECBMOTm9ydGggQ2Fyb2xpbmExDzANBgNV\nBAcTBkR1cmhhbTEZMBcGA1UEChMQb3JnMS5leGFtcGxlLmNvbTEcMBoGA1UEAxMT\nY2Eub3JnMS5leGFtcGxlLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABDC3\nspCTT3pjfFXxkX/SFuBgWRiceR8rSoCNQOnIPeNGZK8xl2Zr7VuY06gqy9c+ecSU\nPUWaXiCQxiLgZuS6TOWjRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAG\nAQH/AgEBMB0GA1UdDgQWBBRFWSc7GZqcJJyJjXSEspzgAYInGzAKBggqhkjOPQQD\nAgNIADBFAiEAodqc+adkiMuU6iv1IF8uJ/nMQbvMGoP3pb2827QzDosCICOw6W+y\nuH03H3RO6KhOcS1ZzPjspyjrcC+dwzYX4DpW\n-----END CERTIFICATE-----\n"
             },
             "grpcOptions": {
                 "ssl-target-name-override": "peer0.org1.example.com",
@@ -34,9 +34,9 @@
             }
         },
         "peer0.org2.example.com": {
-            "url": "grpcs://s2:9051",
+            "url": "grpcs://teraflow.nlehd.de:9051",
             "tlsCACerts": {
-                "pem": "-----BEGIN CERTIFICATE-----\nMIICHzCCAcWgAwIBAgIUejv57h6dJkVIM2R1YnlqykkvG7gwCgYIKoZIzj0EAwIw\nbDELMAkGA1UEBhMCVUsxEjAQBgNVBAgTCUhhbXBzaGlyZTEQMA4GA1UEBxMHSHVy\nc2xleTEZMBcGA1UEChMQb3JnMi5leGFtcGxlLmNvbTEcMBoGA1UEAxMTY2Eub3Jn\nMi5leGFtcGxlLmNvbTAeFw0yMjA3MDUwOTQ0MDBaFw0zNzA3MDEwOTQ0MDBaMGwx\nCzAJBgNVBAYTAlVLMRIwEAYDVQQIEwlIYW1wc2hpcmUxEDAOBgNVBAcTB0h1cnNs\nZXkxGTAXBgNVBAoTEG9yZzIuZXhhbXBsZS5jb20xHDAaBgNVBAMTE2NhLm9yZzIu\nZXhhbXBsZS5jb20wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASFZqoisCIgZyMM\n8e0YBA+jxH/+Fc4Y4OkEl5uGRXGl9s0OemCdvhlX9K+esX2DVk1st1PMfTEj/six\n9XPpVqzNo0UwQzAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBATAd\nBgNVHQ4EFgQUPEyzGBlZEjguoJB16wAmoH2bAh8wCgYIKoZIzj0EAwIDSAAwRQIh\nAL6DAWgrqRtbYoQ0oYAr/2vze0JtQcXoqiQKlyvYkUBbAiB/uSHBk3NwjzI8t8iW\nzQzr5eNy5JwOO0SWwPEv4Ev9iQ==\n-----END CERTIFICATE-----\n"
+                "pem": "-----BEGIN CERTIFICATE-----\nMIICHjCCAcWgAwIBAgIUL48scgv9ItATkBjSNhzYDjLUDsAwCgYIKoZIzj0EAwIw\nbDELMAkGA1UEBhMCVUsxEjAQBgNVBAgTCUhhbXBzaGlyZTEQMA4GA1UEBxMHSHVy\nc2xleTEZMBcGA1UEChMQb3JnMi5leGFtcGxlLmNvbTEcMBoGA1UEAxMTY2Eub3Jn\nMi5leGFtcGxlLmNvbTAeFw0yMjA5MjcwODMwMDBaFw0zNzA5MjMwODMwMDBaMGwx\nCzAJBgNVBAYTAlVLMRIwEAYDVQQIEwlIYW1wc2hpcmUxEDAOBgNVBAcTB0h1cnNs\nZXkxGTAXBgNVBAoTEG9yZzIuZXhhbXBsZS5jb20xHDAaBgNVBAMTE2NhLm9yZzIu\nZXhhbXBsZS5jb20wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ5qz8FfrEQ5S08\nr/avPyTrF2grXj5L4DnbvF4YEZ5Usnbm8Svovu7PO8uiVcwT5vrt6ssOdpBFZYu3\nNndpojnYo0UwQzAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBATAd\nBgNVHQ4EFgQUYcp7axYV9AaIptYQqhiCL0VDmXQwCgYIKoZIzj0EAwIDRwAwRAIg\nWT1V8/6flUPNcBkmbtEEKf83k7+6sR9k1a2wtVeJFnQCIE0ZSIL3k0dKQydQBpiz\nPcZZUULvQivcMlIsw5+mjIGc\n-----END CERTIFICATE-----\n"
             },
             "grpcOptions": {
                 "ssl-target-name-override": "peer0.org2.example.com",
@@ -46,11 +46,11 @@
     },
     "certificateAuthorities": {
         "ca.org1.example.com": {
-            "url": "https://s2:7054",
+            "url": "https://teraflow.nlehd.de:7054",
             "caName": "ca-org1",
             "tlsCACerts": {
                 "pem": [
-                    "-----BEGIN CERTIFICATE-----\nMIICJjCCAc2gAwIBAgIUWZ4l32loO9+FM0FYw61y3dUF5a0wCgYIKoZIzj0EAwIw\ncDELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYDVQQH\nEwZEdXJoYW0xGTAXBgNVBAoTEG9yZzEuZXhhbXBsZS5jb20xHDAaBgNVBAMTE2Nh\nLm9yZzEuZXhhbXBsZS5jb20wHhcNMjIwNzA1MDk0NDAwWhcNMzcwNzAxMDk0NDAw\nWjBwMQswCQYDVQQGEwJVUzEXMBUGA1UECBMOTm9ydGggQ2Fyb2xpbmExDzANBgNV\nBAcTBkR1cmhhbTEZMBcGA1UEChMQb3JnMS5leGFtcGxlLmNvbTEcMBoGA1UEAxMT\nY2Eub3JnMS5leGFtcGxlLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABNPg\nyfDxHr4ZmFp3HB19f27vfc1YTKBnznLqIFwVad2Y+eXfni8DnTRNGgwdkG9uIK2L\n4Y9mwlKG/mTNx629G4GjRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAG\nAQH/AgEBMB0GA1UdDgQWBBSZlT6qe+DAGpEBXyMxzidqCkQ4PjAKBggqhkjOPQQD\nAgNHADBEAiAIG5jwBGddB9CwocmjAzFv8+e7+0bvNSwjrG229QogTgIgbTNoC33P\nmbR5ChlkUAW2t41hTOCSMIwLAlvEwpeCnAk=\n-----END CERTIFICATE-----\n"
+                    "-----BEGIN CERTIFICATE-----\nMIICJzCCAc2gAwIBAgIUb5gDMfVeVdQjFkK3uC8LtlogN+gwCgYIKoZIzj0EAwIw\ncDELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYDVQQH\nEwZEdXJoYW0xGTAXBgNVBAoTEG9yZzEuZXhhbXBsZS5jb20xHDAaBgNVBAMTE2Nh\nLm9yZzEuZXhhbXBsZS5jb20wHhcNMjIwOTI3MDgzMDAwWhcNMzcwOTIzMDgzMDAw\nWjBwMQswCQYDVQQGEwJVUzEXMBUGA1UECBMOTm9ydGggQ2Fyb2xpbmExDzANBgNV\nBAcTBkR1cmhhbTEZMBcGA1UEChMQb3JnMS5leGFtcGxlLmNvbTEcMBoGA1UEAxMT\nY2Eub3JnMS5leGFtcGxlLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABDC3\nspCTT3pjfFXxkX/SFuBgWRiceR8rSoCNQOnIPeNGZK8xl2Zr7VuY06gqy9c+ecSU\nPUWaXiCQxiLgZuS6TOWjRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAG\nAQH/AgEBMB0GA1UdDgQWBBRFWSc7GZqcJJyJjXSEspzgAYInGzAKBggqhkjOPQQD\nAgNIADBFAiEAodqc+adkiMuU6iv1IF8uJ/nMQbvMGoP3pb2827QzDosCICOw6W+y\nuH03H3RO6KhOcS1ZzPjspyjrcC+dwzYX4DpW\n-----END CERTIFICATE-----\n"
                 ]
             },
             "httpOptions": {
@@ -60,9 +60,9 @@
     },
     "orderers": {
         "orderer0.example.com": {
-            "url": "grpcs://s2:7050",
+            "url": "grpcs://teraflow.nlehd.de:7050",
             "tlsCACerts": {
-                "pem": "-----BEGIN CERTIFICATE-----\nMIICCjCCAbGgAwIBAgIURV0KgZTOagIAIU7wRcSg/mKl5RUwCgYIKoZIzj0EAwIw\nYjELMAkGA1UEBhMCVVMxETAPBgNVBAgTCE5ldyBZb3JrMREwDwYDVQQHEwhOZXcg\nWW9yazEUMBIGA1UEChMLZXhhbXBsZS5jb20xFzAVBgNVBAMTDmNhLmV4YW1wbGUu\nY29tMB4XDTIyMDcwNTA5NDQwMFoXDTM3MDcwMTA5NDQwMFowYjELMAkGA1UEBhMC\nVVMxETAPBgNVBAgTCE5ldyBZb3JrMREwDwYDVQQHEwhOZXcgWW9yazEUMBIGA1UE\nChMLZXhhbXBsZS5jb20xFzAVBgNVBAMTDmNhLmV4YW1wbGUuY29tMFkwEwYHKoZI\nzj0CAQYIKoZIzj0DAQcDQgAEWrOugtJgVLAKZRw9jaC15RUbVuTm0ZmsqNyiQrKQ\nYawLE6fs+QIU7WQ25fxlYtmGB2S8nofGCDuwaoTevW0GoaNFMEMwDgYDVR0PAQH/\nBAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFLKBzGXaQg2Irr57\npjoFYZ9F1NoNMAoGCCqGSM49BAMCA0cAMEQCIB1YdgOEsATw2GeaFmq6nqWg0JDT\np456JB/reFmnPWdJAiBPo5H9sMh+MpP4R5ue7nuwYK7SEJ1DOJqWMlPuNhVgtA==\n-----END CERTIFICATE-----\n"
+                "pem": "-----BEGIN CERTIFICATE-----\nMIICCzCCAbGgAwIBAgIUdZQo3q4OqyxIkidmAV4QkewCylIwCgYIKoZIzj0EAwIw\nYjELMAkGA1UEBhMCVVMxETAPBgNVBAgTCE5ldyBZb3JrMREwDwYDVQQHEwhOZXcg\nWW9yazEUMBIGA1UEChMLZXhhbXBsZS5jb20xFzAVBgNVBAMTDmNhLmV4YW1wbGUu\nY29tMB4XDTIyMDkyNzA4MzAwMFoXDTM3MDkyMzA4MzAwMFowYjELMAkGA1UEBhMC\nVVMxETAPBgNVBAgTCE5ldyBZb3JrMREwDwYDVQQHEwhOZXcgWW9yazEUMBIGA1UE\nChMLZXhhbXBsZS5jb20xFzAVBgNVBAMTDmNhLmV4YW1wbGUuY29tMFkwEwYHKoZI\nzj0CAQYIKoZIzj0DAQcDQgAERR0UzsHSFoyON+9Noxmk1IhnTvSdLWGgEpEwrqVr\n5DwitkeJwRWq134JBTmXuZzsUG87oN6Hr94XAEe4j9Zq8qNFMEMwDgYDVR0PAQH/\nBAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFN8XsELp/X0akrlJ\nY3/BWo2jZS3cMAoGCCqGSM49BAMCA0gAMEUCIQCZYYXW/0h3Kq4BmROpOHfrondg\nopf5LndeujYlH3i8tQIgCtpTQiDXZd+IAUduRmn7a46CwJSbjYbXFVX5vumIbE4=\n-----END CERTIFICATE-----\n"
             },
             "grpcOptions": {
                 "ssl-target-name-override": "orderer0.example.com",
diff --git a/src/dlt/gateway/settings.gradle.kts b/src/dlt/gateway/settings.gradle.kts
index 0ebdd07b29682c72c65695e4f5655437ed11d74d..67683a7440a06dd490a07dce3e3858ec8242f1ea 100644
--- a/src/dlt/gateway/settings.gradle.kts
+++ b/src/dlt/gateway/settings.gradle.kts
@@ -1,3 +1,3 @@
 
-rootProject.name = "dlt"
+rootProject.name = "gateway"
 
diff --git a/src/dlt/gateway/src/main/kotlin/Main.kt b/src/dlt/gateway/src/main/kotlin/Main.kt
index 68a820ee9feb622a5f3d8429dd56b905b0e1a1b4..c57c9e980853e84d3c10551588dc8d94c14ad40e 100644
--- a/src/dlt/gateway/src/main/kotlin/Main.kt
+++ b/src/dlt/gateway/src/main/kotlin/Main.kt
@@ -12,7 +12,7 @@
 // duplication of the object or source code - either totally or in
 // part - is strictly prohibited.
 //
-//          Copyright (c) 2021 NEC Laboratories Europe GmbH
+//          Copyright (c) 2022 NEC Laboratories Europe GmbH
 //          All Rights Reserved.
 //
 // Authors: Konstantin Munichev <konstantin.munichev@neclab.eu>
@@ -35,107 +35,127 @@
 //
 // THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY.
 
-import io.ktor.client.*
-import io.ktor.client.engine.cio.*
-import io.ktor.client.features.*
-import io.ktor.client.request.*
-import io.ktor.utils.io.jvm.javaio.*
-import kotlinx.serialization.ExperimentalSerializationApi
-import proto.Config
-import proto.Config.DltConfig
-
-@OptIn(ExperimentalSerializationApi::class)
-suspend fun main(args: Array<String>) {
-    // TODO: default configuration file
-    val cfg = DltConfig.newBuilder().setWallet("wallet").setConnectionFile("config/connection-org1.json")
-        .setUser("appUser")
-        .setChannel("dlt")
-        .setContract("basic").setCaCertFile("config/ca.org1.example.com-cert.pem").setCaUrl("https://s2:7054")
-        .setCaAdmin("admin").setCaAdminSecret("adminpw").setMsp("Org1MSP").setAffiliation("org1.department1")
-        .build()
-    val cfgBytes = cfg.toByteArray()
+import context.ContextOuterClass
+import io.grpc.ManagedChannel
+import io.grpc.ManagedChannelBuilder
+import kotlinx.coroutines.GlobalScope
+import kotlinx.coroutines.launch
+import kotlinx.coroutines.runBlocking
+import dlt.DltGateway
+import dlt.DltGatewayServiceGrpcKt
+import java.io.Closeable
+import java.util.*
+import java.util.concurrent.TimeUnit
 
-    val client = HttpClient(CIO) {
-        HttpResponseValidator {
-            validateResponse { response ->
-                println(response.status)
-            }
-        }
-    }
+class DltServiceClient(private val channel: ManagedChannel) : Closeable {
+    private val stub: DltGatewayServiceGrpcKt.DltGatewayServiceCoroutineStub =
+        DltGatewayServiceGrpcKt.DltGatewayServiceCoroutineStub(channel)
 
-    try {
-        client.post<ByteArray>("http://localhost:8080/dlt/configure") {
-            body = cfgBytes
-        }
-    } catch (e: ClientRequestException) {
-        println(e.response.status)
-        println(String(e.response.content.toInputStream().readAllBytes()))
+    suspend fun putData(data: DltGateway.DltRecord) {
+        println("Sending record ${data.recordId}...")
+        val response = stub.recordToDlt(data)
+        println("Response: ${response.recordId}")
     }
 
-    try {
-        val config = client.get<ByteArray>("http://localhost:8080/dlt/configure")
-        println(DltConfig.parseFrom(config))
-    } catch (e: ClientRequestException) {
-        println(e.response.status)
-        println(String(e.response.content.toInputStream().readAllBytes()))
+    suspend fun getData(id: DltGateway.DltRecordId) {
+        println("Requesting record $id...")
+        val response = stub.getFromDlt(id)
+        println("Got data: $response")
     }
 
-    val uuid = "41f4d2e2-f4ef-4c81-872a-c32f2d26b2ca"
-    try {
-        val record = client.get<ByteArray>("http://localhost:8080/dlt/record") {
-            body = uuid
+    fun subscribe(filter: DltGateway.DltRecordSubscription) {
+        val subscription = stub.subscribeToDlt(filter)
+        GlobalScope.launch {
+            subscription.collect {
+                println("Got subscription event")
+                println(it)
+            }
         }
-        println(Config.DltRecord.parseFrom(record))
-    } catch (e: ClientRequestException) {
-        println(e.response.status)
-        println(String(e.response.content.toInputStream().readAllBytes()))
     }
 
-    val id = Config.DltRecordId.newBuilder().setUuid(uuid).build()
-    val record = Config.DltRecord.newBuilder().setId(id).setOperation(Config.DltRecordOperation.ADD)
-        .setType(Config.DltRecordType.DEVICE).setJson("{}").build()
-    try {
-        val result = client.post<ByteArray>("http://localhost:8080/dlt/record") {
-            body = record.toByteArray()
-        }
-        println(String(result))
-        val requestedRecord = client.get<ByteArray>("http://localhost:8080/dlt/record") {
-            body = uuid
-        }
-        println(Config.DltRecord.parseFrom(requestedRecord))
-    } catch (e: ClientRequestException) {
-        println(e.response.status)
-        println(String(e.response.content.toInputStream().readAllBytes()))
+    override fun close() {
+        channel.shutdown().awaitTermination(5, TimeUnit.SECONDS)
     }
+}
 
-    try {
-        val newRecord = Config.DltRecord.newBuilder().setId(id).setOperation(Config.DltRecordOperation.UPDATE)
-            .setType(Config.DltRecordType.UNKNOWN).setJson("{}").build()
-        val result = client.post<ByteArray>("http://localhost:8080/dlt/record") {
-            body = newRecord.toByteArray()
-        }
-        println(String(result))
-        val requestedRecord = client.get<ByteArray>("http://localhost:8080/dlt/record") {
-            body = uuid
-        }
-        println(Config.DltRecord.parseFrom(requestedRecord))
-    } catch (e: ClientRequestException) {
-        println(e.response.status)
-        println(String(e.response.content.toInputStream().readAllBytes()))
-    }
 
+fun main() = runBlocking {
+    val port = 50051
+    val channel = ManagedChannelBuilder.forAddress("localhost", port).usePlaintext().build()
+
+    val client = DltServiceClient(channel)
+
+    val domainUuid = UUID.randomUUID().toString()
+    val recordUuid = UUID.randomUUID().toString()
+    println("New domain uuid $domainUuid")
+    println("New record uuid $recordUuid")
+
+    val id = DltGateway.DltRecordId.newBuilder()
+        .setDomainUuid(
+            ContextOuterClass.Uuid.newBuilder()
+                .setUuid(domainUuid)
+        )
+        .setRecordUuid(
+            ContextOuterClass.Uuid.newBuilder()
+                .setUuid(recordUuid)
+        )
+        .setType(DltGateway.DltRecordTypeEnum.DLTRECORDTYPE_SERVICE)
+        .build()
+
+    val subscription = DltGateway.DltRecordSubscription.newBuilder()
+        .addType(DltGateway.DltRecordTypeEnum.DLTRECORDTYPE_CONTEXT)
+        .addType(DltGateway.DltRecordTypeEnum.DLTRECORDTYPE_LINK)
+        .addType(DltGateway.DltRecordTypeEnum.DLTRECORDTYPE_SERVICE)
+        .addOperation(DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_ADD)
+        .addOperation(DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE)
+        .addOperation(DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_DELETE)
+        .build()
+
+    client.subscribe(subscription)
+
+    Thread.sleep(5000)
+
+    val data = DltGateway.DltRecord.newBuilder()
+        .setRecordId(id)
+        .setOperation(DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_ADD)
+        .setDataJson("\"{\"device_config\": {\"config_rules\": []}, \"device_drivers\": []," +
+                "\"device_endpoints\": [], \"device_id\": {\"device_uuid\": {\"uuid\": \"dev-12345\"}}," +
+                "\"device_operational_status\": \"DEVICEOPERATIONALSTATUS_ENABLED\"," +
+                "\"device_type\": \"packet-router\"}\", \"operation\": \"DLTRECORDOPERATION_ADD\"," +
+                "\"record_id\": {\"domain_uuid\": {\"uuid\": \"tfs-a\"}, \"record_uuid\": {\"uuid\": \"dev-12345\"}," +
+                "\"type\": \"DLTRECORDTYPE_DEVICE\"}")
+        .build()
+
+    println("sending new record")
+    client.putData(data)
+    client.getData(id)
+
+    Thread.sleep(5000)
+
+    val updateData = DltGateway.DltRecord.newBuilder()
+        .setRecordId(id)
+        .setOperation(DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE)
+        .setDataJson("{\"name\": \"test\"}")
+        .build()
+
+    println("updating record")
+    client.putData(updateData)
+    client.getData(id)
+
+    Thread.sleep(5000)
+
+    val removeData = DltGateway.DltRecord.newBuilder()
+        .setRecordId(id)
+        .setOperation(DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_DELETE)
+        .setDataJson("{\"name\": \"test\"}")
+        .build()
+
+    println("removing record")
+    client.putData(removeData)
     try {
-        val newRecord = Config.DltRecord.newBuilder().setId(id).setOperation(Config.DltRecordOperation.DISABLE).build()
-        val result = client.post<ByteArray>("http://localhost:8080/dlt/record") {
-            body = newRecord.toByteArray()
-        }
-        println(String(result))
-        val requestedRecord = client.get<ByteArray>("http://localhost:8080/dlt/record") {
-            body = uuid
-        }
-        println(Config.DltRecord.parseFrom(requestedRecord))
-    } catch (e: ClientRequestException) {
-        println(e.response.status)
-        println(String(e.response.content.toInputStream().readAllBytes()))
+        client.getData(id)
+    } catch (e: Exception) {
+        println(e.toString())
     }
-}
\ No newline at end of file
+    Thread.sleep(5000)
+}
diff --git a/src/dlt/gateway/src/main/kotlin/fabric/ConnectGateway.kt b/src/dlt/gateway/src/main/kotlin/fabric/ConnectGateway.kt
index 245bd4828776837802a1303787d5cfc34a5bffbc..00ec40d57dcd8bc4da18f30a6bed6f1d2a032b48 100644
--- a/src/dlt/gateway/src/main/kotlin/fabric/ConnectGateway.kt
+++ b/src/dlt/gateway/src/main/kotlin/fabric/ConnectGateway.kt
@@ -12,7 +12,7 @@
 // duplication of the object or source code - either totally or in
 // part - is strictly prohibited.
 //
-//          Copyright (c) 2021 NEC Laboratories Europe GmbH
+//          Copyright (c) 2022 NEC Laboratories Europe GmbH
 //          All Rights Reserved.
 //
 // Authors: Konstantin Munichev <konstantin.munichev@neclab.eu>
diff --git a/src/dlt/gateway/src/main/kotlin/fabric/FabricConnector.kt b/src/dlt/gateway/src/main/kotlin/fabric/FabricConnector.kt
index d7c163954ec4f1b63b5e646e714d493050e114c0..af6592be93c86e316a64cd23edd46bbbdc240cfd 100644
--- a/src/dlt/gateway/src/main/kotlin/fabric/FabricConnector.kt
+++ b/src/dlt/gateway/src/main/kotlin/fabric/FabricConnector.kt
@@ -12,7 +12,7 @@
 // duplication of the object or source code - either totally or in
 // part - is strictly prohibited.
 //
-//          Copyright (c) 2021 NEC Laboratories Europe GmbH
+//          Copyright (c) 2022 NEC Laboratories Europe GmbH
 //          All Rights Reserved.
 //
 // Authors: Konstantin Munichev <konstantin.munichev@neclab.eu>
@@ -37,6 +37,11 @@
 
 package fabric
 
+import context.ContextOuterClass
+import dlt.DltGateway.DltRecord
+import dlt.DltGateway.DltRecordEvent
+import kotlinx.coroutines.channels.Channel
+import kotlinx.coroutines.runBlocking
 import org.hyperledger.fabric.gateway.Contract
 import org.hyperledger.fabric.gateway.ContractEvent
 import org.hyperledger.fabric.gateway.Wallet
@@ -53,6 +58,11 @@ class FabricConnector(val config: Config.DltConfig) {
     private val wallet: Wallet
     private val contract: Contract
 
+    private val channels: MutableList<Channel<DltRecordEvent>> = mutableListOf()
+
+    private val encoder: Base64.Encoder = Base64.getEncoder()
+    private val decoder: Base64.Decoder = Base64.getDecoder()
+
     init {
         // Create a CA client for interacting with the CA.
         val props = Properties()
@@ -65,7 +75,42 @@ class FabricConnector(val config: Config.DltConfig) {
         // Create a wallet for managing identities
         wallet = Wallets.newFileSystemWallet(Paths.get(config.wallet))
         contract = connect()
-        subscribeForEvents()
+
+        fabricSubscribe()
+    }
+
+    private fun fabricSubscribe() {
+        val consumer = Consumer { event: ContractEvent? ->
+            run {
+                println("new event detected")
+                val record = DltRecord.parseFrom(decoder.decode(event?.payload?.get()))
+                println(record.recordId.recordUuid)
+                val eventType: ContextOuterClass.EventTypeEnum = when (event?.name) {
+                    "Add" -> ContextOuterClass.EventTypeEnum.EVENTTYPE_CREATE
+                    "Update" -> ContextOuterClass.EventTypeEnum.EVENTTYPE_UPDATE
+                    "Remove" -> ContextOuterClass.EventTypeEnum.EVENTTYPE_REMOVE
+                    else -> ContextOuterClass.EventTypeEnum.EVENTTYPE_UNDEFINED
+                }
+                val pbEvent = DltRecordEvent.newBuilder()
+                    .setEvent(
+                        ContextOuterClass.Event.newBuilder()
+                            .setTimestamp(
+                                ContextOuterClass.Timestamp.newBuilder()
+                                    .setTimestamp(System.currentTimeMillis().toDouble())
+                            )
+                            .setEventType(eventType)
+                    )
+                    .setRecordId(record.recordId)
+                    .build()
+
+                runBlocking {
+                    channels.forEach {
+                        it.trySend(pbEvent)
+                    }
+                }
+            }
+        }
+        contract.addContractListener(consumer)
     }
 
     fun connect(): Contract {
@@ -74,44 +119,60 @@ class FabricConnector(val config: Config.DltConfig) {
         return getContract(config, wallet)
     }
 
-    fun putData(record: Config.DltRecord): String {
-        println(record.type.toString())
-        return String(
+    fun putData(record: DltRecord): String {
+        println(record.toString())
+
+        try {
             contract.submitTransaction(
                 "AddRecord",
-                record.id.uuid,
-                record.type.number.toString(),
-                record.json
+                record.recordId.recordUuid.uuid,
+                encoder.encodeToString(record.toByteArray())
             )
-        )
+        } catch (e: Exception) {
+            println(e.toString())
+            return e.toString()
+        }
+        return ""
     }
 
-    fun getData(uuid: String): Config.DltRecord {
-        val result = contract.evaluateTransaction("GetRecord", uuid)
-        return Config.DltRecord.parseFrom(result)
+    fun getData(uuid: String): DltRecord {
+        return try {
+            val result = contract.evaluateTransaction("GetRecord", uuid)
+            DltRecord.parseFrom(decoder.decode(result))
+        } catch (e: Exception) {
+            println(e.toString())
+            DltRecord.getDefaultInstance()
+        }
     }
 
-    fun updateData(record: Config.DltRecord): String {
-        return String(
+    fun updateData(record: DltRecord): String {
+        try {
             contract.submitTransaction(
                 "UpdateRecord",
-                record.id.uuid,
-                record.type.number.toString(),
-                record.json
+                record.recordId.recordUuid.uuid,
+                encoder.encodeToString(record.toByteArray())
             )
-        )
+        } catch (e: Exception) {
+            return e.toString()
+        }
+        return ""
     }
 
-    fun deleteData(uuid: String): String {
-        return String(contract.submitTransaction("DeactivateRecord", uuid))
+    fun deleteData(record: DltRecord): String {
+        try {
+            contract.submitTransaction(
+                "DeleteRecord",
+                record.recordId.recordUuid.uuid,
+            )
+        } catch (e: Exception) {
+            return e.toString()
+        }
+        return ""
     }
 
-    private fun subscribeForEvents() {
-        val consumer = Consumer {
-            event: ContractEvent? -> run {
-                println(event?.payload?.get()?.let { String(it) })
-            }
-        }
-        contract.addContractListener(consumer)
+    fun subscribeForEvents(): Channel<DltRecordEvent> {
+        val produceCh = Channel<DltRecordEvent>()
+        channels.add(produceCh)
+        return produceCh
     }
 }
\ No newline at end of file
diff --git a/src/dlt/gateway/src/main/kotlin/grpc/FabricServer.kt b/src/dlt/gateway/src/main/kotlin/grpc/FabricServer.kt
new file mode 100644
index 0000000000000000000000000000000000000000..9b4e1f4dc38d80c22847ae213053119b301bdf3d
--- /dev/null
+++ b/src/dlt/gateway/src/main/kotlin/grpc/FabricServer.kt
@@ -0,0 +1,94 @@
+//     NEC Laboratories Europe GmbH
+//
+//     PROPRIETARY INFORMATION
+//
+// The software and its source code contain valuable trade secrets and
+// shall be maintained in confidence and treated as confidential
+// information. The software may only be used for evaluation and/or
+// testing purposes, unless otherwise explicitly stated in a written
+// agreement with NEC Laboratories Europe GmbH.
+//
+// Any unauthorized publication, transfer to third parties or
+// duplication of the object or source code - either totally or in
+// part - is strictly prohibited.
+//
+//          Copyright (c) 2022 NEC Laboratories Europe GmbH
+//          All Rights Reserved.
+//
+// Authors: Konstantin Munichev <konstantin.munichev@neclab.eu>
+//
+//
+// NEC Laboratories Europe GmbH DISCLAIMS ALL WARRANTIES, EITHER
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AND THE
+// WARRANTY AGAINST LATENT DEFECTS, WITH RESPECT TO THE PROGRAM AND
+// THE ACCOMPANYING DOCUMENTATION.
+//
+// NO LIABILITIES FOR CONSEQUENTIAL DAMAGES: IN NO EVENT SHALL NEC
+// Laboratories Europe GmbH or ANY OF ITS SUBSIDIARIES BE LIABLE FOR
+// ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR
+// LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
+// INFORMATION, OR OTHER PECUNIARY LOSS AND INDIRECT, CONSEQUENTIAL,
+// INCIDENTAL, ECONOMIC OR PUNITIVE DAMAGES) ARISING OUT OF THE USE OF
+// OR INABILITY TO USE THIS PROGRAM, EVEN IF NEC Laboratories Europe
+// GmbH HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+//
+// THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY.
+
+package grpc
+
+import fabric.FabricConnector
+import io.grpc.Server
+import io.grpc.ServerBuilder
+import proto.Config
+import kotlin.random.Random
+import kotlin.random.nextUInt
+
+class FabricServer(val port: Int) {
+    private val server: Server
+
+    init {
+        val id = Random.nextUInt()
+        val cfg = Config.DltConfig.newBuilder().setWallet("wallet$id").setConnectionFile("config/connection-org1.json")
+            .setUser("appUser$id")
+            .setChannel("dlt")
+            .setContract("basic").setCaCertFile("config/ca.org1.example.com-cert.pem").setCaUrl("https://teraflow.nlehd.de:7054")
+            .setCaAdmin("admin").setCaAdminSecret("adminpw").setMsp("Org1MSP").setAffiliation("org1.department1")
+            .build()
+        val connector = FabricConnector(cfg)
+
+        val dltService = DLTService(connector)
+        server = ServerBuilder
+            .forPort(port)
+            .addService(dltService)
+            .build()
+
+    }
+
+    fun start() {
+        server.start()
+        println("Server started, listening on $port")
+        Runtime.getRuntime().addShutdownHook(
+            Thread {
+                println("Shutting down...")
+                this@FabricServer.stop()
+                println("Server shut down")
+            }
+        )
+    }
+
+    private fun stop() {
+        server.shutdown()
+    }
+
+    fun blockUntilShutdown() {
+        server.awaitTermination()
+    }
+}
+
+fun main() {
+    val port = 50051
+    val server = FabricServer(port)
+    server.start()
+    server.blockUntilShutdown()
+}
diff --git a/src/dlt/gateway/src/main/kotlin/grpc/GrpcHandler.kt b/src/dlt/gateway/src/main/kotlin/grpc/GrpcHandler.kt
new file mode 100644
index 0000000000000000000000000000000000000000..d39c24a1a87aacb32d828dcba8208b34312d7409
--- /dev/null
+++ b/src/dlt/gateway/src/main/kotlin/grpc/GrpcHandler.kt
@@ -0,0 +1,95 @@
+//     NEC Laboratories Europe GmbH
+//
+//     PROPRIETARY INFORMATION
+//
+// The software and its source code contain valuable trade secrets and
+// shall be maintained in confidence and treated as confidential
+// information. The software may only be used for evaluation and/or
+// testing purposes, unless otherwise explicitly stated in a written
+// agreement with NEC Laboratories Europe GmbH.
+//
+// Any unauthorized publication, transfer to third parties or
+// duplication of the object or source code - either totally or in
+// part - is strictly prohibited.
+//
+//          Copyright (c) 2022 NEC Laboratories Europe GmbH
+//          All Rights Reserved.
+//
+// Authors: Konstantin Munichev <konstantin.munichev@neclab.eu>
+//
+//
+// NEC Laboratories Europe GmbH DISCLAIMS ALL WARRANTIES, EITHER
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AND THE
+// WARRANTY AGAINST LATENT DEFECTS, WITH RESPECT TO THE PROGRAM AND
+// THE ACCOMPANYING DOCUMENTATION.
+//
+// NO LIABILITIES FOR CONSEQUENTIAL DAMAGES: IN NO EVENT SHALL NEC
+// Laboratories Europe GmbH or ANY OF ITS SUBSIDIARIES BE LIABLE FOR
+// ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR
+// LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
+// INFORMATION, OR OTHER PECUNIARY LOSS AND INDIRECT, CONSEQUENTIAL,
+// INCIDENTAL, ECONOMIC OR PUNITIVE DAMAGES) ARISING OUT OF THE USE OF
+// OR INABILITY TO USE THIS PROGRAM, EVEN IF NEC Laboratories Europe
+// GmbH HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+//
+// THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY.
+
+package grpc
+
+import fabric.FabricConnector
+import kotlinx.coroutines.flow.Flow
+import kotlinx.coroutines.flow.consumeAsFlow
+import context.ContextOuterClass
+import dlt.DltGateway
+import dlt.DltGatewayServiceGrpcKt
+
+class DLTService(private val connector: FabricConnector) :
+    DltGatewayServiceGrpcKt.DltGatewayServiceCoroutineImplBase() {
+    override suspend fun recordToDlt(request: DltGateway.DltRecord): DltGateway.DltRecordStatus {
+        println("Incoming request ${request.recordId.recordUuid}")
+        val error = when (request.operation) {
+            DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_ADD -> {
+                println("Adding new record")
+                connector.putData(request)
+            }
+            DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE -> {
+                println("Updating record")
+                connector.updateData(request)
+            }
+            DltGateway.DltRecordOperationEnum.DLTRECORDOPERATION_DELETE -> {
+                println("Deleting record")
+                connector.deleteData(request)
+            }
+            else -> "Undefined or unknown operation"
+        }
+
+        val dltStatusEnum: DltGateway.DltRecordStatusEnum = if (error == "") {
+            DltGateway.DltRecordStatusEnum.DLTRECORDSTATUS_SUCCEEDED
+        } else {
+            DltGateway.DltRecordStatusEnum.DLTRECORDSTATUS_FAILED
+        }
+        return DltGateway.DltRecordStatus.newBuilder()
+            .setRecordId(request.recordId)
+            .setStatus(dltStatusEnum)
+            .setErrorMessage(error)
+            .build()
+    }
+
+    override suspend fun getFromDlt(request: DltGateway.DltRecordId): DltGateway.DltRecord {
+        return connector.getData(request.recordUuid.uuid)
+    }
+
+    override fun subscribeToDlt(request: DltGateway.DltRecordSubscription): Flow<DltGateway.DltRecordEvent> {
+        println("Subscription request: $request")
+        return connector.subscribeForEvents().consumeAsFlow()
+    }
+
+    override suspend fun getDltStatus(request: ContextOuterClass.TeraFlowController): DltGateway.DltPeerStatus {
+        return super.getDltStatus(request)
+    }
+
+    override suspend fun getDltPeers(request: ContextOuterClass.Empty): DltGateway.DltPeerStatusList {
+        return super.getDltPeers(request)
+    }
+}
\ No newline at end of file
diff --git a/src/dlt/gateway/src/main/kotlin/http/Server.kt b/src/dlt/gateway/src/main/kotlin/http/Server.kt
deleted file mode 100644
index 4e3400af36b32726096b177da230c8baa4bb3dab..0000000000000000000000000000000000000000
--- a/src/dlt/gateway/src/main/kotlin/http/Server.kt
+++ /dev/null
@@ -1,162 +0,0 @@
-//     NEC Laboratories Europe GmbH
-//
-//     PROPRIETARY INFORMATION
-//
-// The software and its source code contain valuable trade secrets and
-// shall be maintained in confidence and treated as confidential
-// information. The software may only be used for evaluation and/or
-// testing purposes, unless otherwise explicitly stated in a written
-// agreement with NEC Laboratories Europe GmbH.
-//
-// Any unauthorized publication, transfer to third parties or
-// duplication of the object or source code - either totally or in
-// part - is strictly prohibited.
-//
-//          Copyright (c) 2021 NEC Laboratories Europe GmbH
-//          All Rights Reserved.
-//
-// Authors: Konstantin Munichev <konstantin.munichev@neclab.eu>
-//
-//
-// NEC Laboratories Europe GmbH DISCLAIMS ALL WARRANTIES, EITHER
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AND THE
-// WARRANTY AGAINST LATENT DEFECTS, WITH RESPECT TO THE PROGRAM AND
-// THE ACCOMPANYING DOCUMENTATION.
-//
-// NO LIABILITIES FOR CONSEQUENTIAL DAMAGES: IN NO EVENT SHALL NEC
-// Laboratories Europe GmbH or ANY OF ITS SUBSIDIARIES BE LIABLE FOR
-// ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR
-// LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
-// INFORMATION, OR OTHER PECUNIARY LOSS AND INDIRECT, CONSEQUENTIAL,
-// INCIDENTAL, ECONOMIC OR PUNITIVE DAMAGES) ARISING OUT OF THE USE OF
-// OR INABILITY TO USE THIS PROGRAM, EVEN IF NEC Laboratories Europe
-// GmbH HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-//
-// THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY.
-
-package http
-
-import fabric.FabricConnector
-import io.ktor.application.*
-import io.ktor.features.*
-import io.ktor.http.*
-import io.ktor.request.*
-import io.ktor.response.*
-import io.ktor.routing.*
-import io.ktor.server.engine.*
-import io.ktor.server.netty.*
-import kotlinx.coroutines.Dispatchers
-import kotlinx.coroutines.sync.Mutex
-import kotlinx.coroutines.withContext
-import proto.Config
-import proto.Config.DltConfig
-import proto.Config.DltRecord
-
-class Server {
-    var connector: FabricConnector? = null
-    val port = 8080
-    val mutex = Mutex()
-}
-
-fun checkException(e: Exception): String {
-    if (e.message == null) return ""
-    return e.message!!
-}
-
-fun main() {
-    val server = Server()
-    embeddedServer(Netty, port = server.port) {
-        install(ContentNegotiation)
-        routing {
-            post("/dlt/configure") {
-                withContext(Dispatchers.IO) {
-                    try {
-                        val data = call.receiveStream()
-                        val config = DltConfig.parseFrom(data)
-                        println(config)
-                        server.mutex.lock()
-                        server.connector = FabricConnector(config)
-                        server.mutex.unlock()
-                        call.response.status(HttpStatusCode.Created)
-                    }
-                    // TODO: catch exceptions one by one
-                    catch (e: Exception) {
-                        call.respond(HttpStatusCode.BadRequest, checkException(e))
-                        e.printStackTrace()
-                    }
-                }
-            }
-            get("/dlt/configure") {
-                withContext(Dispatchers.IO) {
-                    server.mutex.lock()
-                    if (server.connector == null) {
-                        server.mutex.unlock()
-                        call.respond(HttpStatusCode.NotFound, "Not initialized")
-                    } else {
-                        val configBytes = server.connector!!.config.toByteArray()
-                        server.mutex.unlock()
-                        call.respond(HttpStatusCode.OK, configBytes)
-                    }
-                }
-            }
-            post("/dlt/record") {
-                withContext(Dispatchers.IO) {
-                    server.mutex.lock()
-                    try {
-                        if (server.connector == null) {
-                            call.respond(HttpStatusCode.NotFound, "Not initialized")
-                        } else {
-                            val record = DltRecord.parseFrom(call.receiveStream())
-                            when (record.operation) {
-                                Config.DltRecordOperation.ADD -> {
-                                    val result = server.connector!!.putData(record)
-                                    call.respond(HttpStatusCode.Created, result)
-                                }
-                                Config.DltRecordOperation.UPDATE -> {
-                                    val result = server.connector!!.updateData(record)
-                                    call.respond(HttpStatusCode.OK, result)
-                                }
-                                // TODO: Disable should require only uuid
-                                Config.DltRecordOperation.DISABLE -> {
-                                    val result = server.connector!!.deleteData(record.id.uuid)
-                                    call.respond(HttpStatusCode.OK, result)
-                                }
-                                else -> {
-                                    call.respond(HttpStatusCode.BadRequest, "Invalid operation")
-                                }
-                            }
-                        }
-                    }
-                    // TODO: catch exceptions one by one
-                    catch (e: Exception) {
-                        call.respond(HttpStatusCode.BadRequest, checkException(e))
-                        e.printStackTrace()
-                    }
-                    server.mutex.unlock()
-                }
-            }
-            get("/dlt/record") {
-                withContext(Dispatchers.IO) {
-                    server.mutex.lock()
-                    try {
-                        if (server.connector == null) {
-                            call.respond(HttpStatusCode.NotFound)
-                        } else {
-                            val uuid = call.receiveText()
-                            println("Uuid request: $uuid")
-                            val result = server.connector!!.getData(uuid)
-                            call.respond(HttpStatusCode.OK, result.toByteArray())
-                        }
-                    }
-                    // TODO: catch exceptions one by one
-                    catch (e: Exception) {
-                        call.respond(HttpStatusCode.NotFound, checkException(e))
-                        e.printStackTrace()
-                    }
-                    server.mutex.unlock()
-                }
-            }
-        }
-    }.start(wait = true)
-}
diff --git a/src/dlt/gateway/src/main/kotlin/proto/Config.proto b/src/dlt/gateway/src/main/kotlin/proto/Config.proto
index f492e63ce65924a98b38ea4925d43336f84d211c..b6d4c5614dca4b7784b96ebdb1d002f85c4fd0e2 100644
--- a/src/dlt/gateway/src/main/kotlin/proto/Config.proto
+++ b/src/dlt/gateway/src/main/kotlin/proto/Config.proto
@@ -12,7 +12,7 @@
 // duplication of the object or source code - either totally or in
 // part - is strictly prohibited.
 //
-//          Copyright (c) 2021 NEC Laboratories Europe GmbH
+//          Copyright (c) 2022 NEC Laboratories Europe GmbH
 //          All Rights Reserved.
 //
 // Authors: Konstantin Munichev <konstantin.munichev@neclab.eu>
@@ -52,29 +52,3 @@ message DltConfig {
   string msp = 10;
   string affiliation = 11;
 }
-
-message DltRecordId {
-  string uuid = 1;
-}
-
-enum DltRecordOperation {
-  OP_UNSET = 0;
-  ADD = 1;
-  UPDATE = 2;
-  DISABLE = 3;
-}
-
-enum DltRecordType {
-  RECORD_UNSET = 0;
-  UNKNOWN = 1;
-  SERVICE = 2;
-  DEVICE = 3;
-  SLICE = 4;
-}
-
-message DltRecord {
-  DltRecordId id = 1;
-  DltRecordOperation operation = 2;
-  DltRecordType type = 3;
-  string json = 4;
-}
\ No newline at end of file
diff --git a/src/dlt/mock_blockchain/Dockerfile b/src/dlt/mock_blockchain/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..22199b5f8f442e6d4617a2aed2e1dec9ad13e31a
--- /dev/null
+++ b/src/dlt/mock_blockchain/Dockerfile
@@ -0,0 +1,68 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/mock_blockchain
+WORKDIR /var/teraflow/mock_blockchain
+COPY src/dlt/mock_blockchain/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/dlt/mock_blockchain/. mock_blockchain
+
+# Start the service
+ENTRYPOINT ["python", "-m", "mock_blockchain.service"]
diff --git a/src/dlt/mock_blockchain/__init__.py b/src/dlt/mock_blockchain/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a
--- /dev/null
+++ b/src/dlt/mock_blockchain/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/dlt/mock_blockchain/requirements.in b/src/dlt/mock_blockchain/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/dlt/mock_blockchain/service/__init__.py b/src/dlt/mock_blockchain/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a
--- /dev/null
+++ b/src/dlt/mock_blockchain/service/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/dlt/mock_blockchain/service/__main__.py b/src/dlt/mock_blockchain/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..359c6990addfcd9278496338c50320c152c1810f
--- /dev/null
+++ b/src/dlt/mock_blockchain/service/__main__.py
@@ -0,0 +1,61 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from common.Constants import ServiceNameEnum
+from common.Settings import get_log_level, get_service_port_grpc
+from common.proto.dlt_gateway_pb2_grpc import add_DltGatewayServiceServicer_to_server
+from common.tests.MockServicerImpl_DltGateway import MockServicerImpl_DltGateway
+from common.tools.service.GenericGrpcService import GenericGrpcService
+
+terminate = threading.Event()
+
+logging.basicConfig(level=get_log_level())
+LOGGER = logging.getLogger(__name__)
+
+class MockDltGatewayService(GenericGrpcService):
+    def __init__(self, cls_name: str = 'MockDltGatewayService') -> None:
+        port = get_service_port_grpc(ServiceNameEnum.DLT_GATEWAY)
+        super().__init__(port, cls_name=cls_name)
+        self.dltgateway_servicer = MockServicerImpl_DltGateway()
+
+    # pylint: disable=attribute-defined-outside-init
+    def install_servicers(self):
+        add_DltGatewayServiceServicer_to_server(self.dltgateway_servicer, self.server)
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.info('Starting...')
+
+    # Starting Mock DLT gateway service
+    grpc_service = MockDltGatewayService()
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=0.1): pass
+
+    LOGGER.info('Terminating...')
+    grpc_service.stop()
+
+    LOGGER.info('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/monitoring/client/MonitoringClient.py b/src/monitoring/client/MonitoringClient.py
index f65072f19013b820312aa56b7f0062f9c95f712c..73607a081cd57e7c62b9c4e2c5e487868e72d189 100644
--- a/src/monitoring/client/MonitoringClient.py
+++ b/src/monitoring/client/MonitoringClient.py
@@ -21,8 +21,8 @@ from common.tools.client.RetryDecorator import retry, delay_exponential
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.proto.context_pb2 import Empty
 from common.proto.monitoring_pb2 import Kpi, KpiDescriptor, KpiId, MonitorKpiRequest, \
-    KpiDescriptorList, KpiQuery, KpiList, SubsDescriptor, SubscriptionID, SubsIDList, \
-    AlarmDescriptor, AlarmID, AlarmIDList, AlarmResponse, AlarmSubscription
+    KpiDescriptorList, KpiQuery, KpiList, SubsDescriptor, SubscriptionID, SubsList, \
+    SubsResponse, AlarmDescriptor, AlarmID, AlarmList, AlarmResponse, AlarmSubscription
 from common.proto.monitoring_pb2_grpc import MonitoringServiceStub
 
 LOGGER = logging.getLogger(__name__)
@@ -100,10 +100,10 @@ class MonitoringClient:
         return response
 
     @RETRY_DECORATOR
-    def SubscribeKpi(self, request : SubsDescriptor) -> Iterator[KpiList]:
-        LOGGER.debug('SubscribeKpi: {:s}'.format(grpc_message_to_json_string(request)))
-        response = self.stub.SubscribeKpi(request)
-        LOGGER.debug('SubscribeKpi result: {:s}'.format(grpc_message_to_json_string(response)))
+    def SetKpiSubscription(self, request : SubsDescriptor) -> Iterator[SubsResponse]:
+        LOGGER.debug('SetKpiSubscription: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SetKpiSubscription(request)
+        LOGGER.debug('SetKpiSubscription result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
     @RETRY_DECORATOR
@@ -114,7 +114,7 @@ class MonitoringClient:
         return response
 
     @RETRY_DECORATOR
-    def GetSubscriptions(self, request : Empty) -> SubsIDList:
+    def GetSubscriptions(self, request : Empty) -> SubsList:
         LOGGER.debug('GetSubscriptions: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.GetSubscriptions(request)
         LOGGER.debug('GetSubscriptions result: {:s}'.format(grpc_message_to_json_string(response)))
@@ -135,7 +135,7 @@ class MonitoringClient:
         return response
 
     @RETRY_DECORATOR
-    def GetAlarms(self, request : Empty) -> AlarmIDList:
+    def GetAlarms(self, request : Empty) -> AlarmList:
         LOGGER.debug('GetAlarms: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.GetAlarms(request)
         LOGGER.debug('GetAlarms result: {:s}'.format(grpc_message_to_json_string(response)))
diff --git a/src/monitoring/requirements.in b/src/monitoring/requirements.in
index 50f283a1940ed99d16276857d2cab22220921879..c07f0c8f4079482a20a138d190004fa314fc9860 100644
--- a/src/monitoring/requirements.in
+++ b/src/monitoring/requirements.in
@@ -17,6 +17,7 @@ redis==4.1.2
 requests==2.27.1
 xmltodict==0.12.0
 questdb==1.0.1
+psycopg2-binary==2.9.3
 
 # pip's dependency resolver does not take into account installed packages.
 # p4runtime does not specify the version of grpcio/protobuf it needs, so it tries to install latest one
diff --git a/src/monitoring/service/AlarmManager.py b/src/monitoring/service/AlarmManager.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5ac8915c3728c7894dc70ab901215dd5a7feb41
--- /dev/null
+++ b/src/monitoring/service/AlarmManager.py
@@ -0,0 +1,32 @@
+from apscheduler.schedulers.background import BackgroundScheduler
+from apscheduler.executors.pool import ProcessPoolExecutor
+from apscheduler.jobstores.base import JobLookupError
+from datetime import datetime
+import time
+import logging
+
+LOGGER = logging.getLogger(__name__)
+
+class AlarmManager():
+    def __init__(self, metrics_db):
+        self.metrics_db = metrics_db
+        self.scheduler = BackgroundScheduler(executors={'processpool': ProcessPoolExecutor(max_workers=20)})
+        self.scheduler.start()
+        LOGGER.info("Alarm Manager Initialized")
+
+    def create_alarm(self, alarm_queue,alarm_id, kpi_id, kpiMinValue, kpiMaxValue, inRange, includeMinValue, includeMaxValue, subscription_frequency_ms, subscription_timeout_s=None):
+        start_date=None
+        end_date=None
+        if subscription_timeout_s:
+            start_timestamp=time.time()
+            start_date=datetime.fromtimestamp(start_timestamp)
+            end_date=datetime.fromtimestamp(start_timestamp+subscription_timeout_s)
+        self.scheduler.add_job(self.metrics_db.get_alarm_data, args=(alarm_queue,kpi_id, kpiMinValue, kpiMaxValue, inRange, includeMinValue, includeMaxValue, subscription_frequency_ms),trigger='interval', seconds=(subscription_frequency_ms/1000), start_date=start_date, end_date=end_date, id=alarm_id)
+        LOGGER.debug(f"Alarm job {alarm_id} succesfully created")
+
+    def delete_alarm(self, alarm_id):
+        try:
+            self.scheduler.remove_job(alarm_id)
+            LOGGER.debug(f"Alarm job {alarm_id} succesfully deleted")
+        except (Exception, JobLookupError) as e:
+            LOGGER.debug(f"Alarm job {alarm_id} does not exists")
diff --git a/src/monitoring/service/ManagementDBTools.py b/src/monitoring/service/ManagementDBTools.py
index 63bdb1893499ff560ff03866f31e3619070d3201..ece9bedb252ad589c27e2832ba1b2fbe4035e9a3 100644
--- a/src/monitoring/service/ManagementDBTools.py
+++ b/src/monitoring/service/ManagementDBTools.py
@@ -12,143 +12,292 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import sqlite3 as sl
+import sqlite3
+import logging
 
-class ManagementDB():
+LOGGER = logging.getLogger(__name__)
+
+
+class ManagementDB:
     def __init__(self, database):
-        self.client = sl.connect(database, check_same_thread=False)
-        self.create_monitoring_table()
-        self.create_subscription_table()
-        self.create_alarm_table()
-        
+        try:
+            self.client = sqlite3.connect(database, check_same_thread=False)
+            self.create_monitoring_table()
+            self.create_subscription_table()
+            self.create_alarm_table()
+            LOGGER.info("ManagementDB initialized")
+        except:
+            LOGGER.info("ManagementDB cannot be initialized")
+            raise Exception("Critical error in the monitoring component")
+
     def create_monitoring_table(self):
-        self.client.execute("""
-            CREATE TABLE IF NOT EXISTS kpi(
-                kpi_id INTEGER PRIMARY KEY AUTOINCREMENT,
-                kpi_description TEXT,
-                kpi_sample_type INTEGER,
-                device_id INTEGER,
-                endpoint_id INTEGER,
-                service_id INTEGER
-            );
-        """)
-    
+        try:
+            result = self.client.execute(
+                """
+                CREATE TABLE IF NOT EXISTS kpi(
+                    kpi_id INTEGER PRIMARY KEY AUTOINCREMENT,
+                    kpi_description TEXT,
+                    kpi_sample_type INTEGER,
+                    device_id INTEGER,
+                    endpoint_id INTEGER,
+                    service_id INTEGER
+                );
+            """
+            )
+            LOGGER.debug("KPI table created in the ManagementDB")
+        except sqlite3.Error as e:
+            LOGGER.debug(f"KPI table cannot be created in the ManagementD. {e}")
+            raise Exception
+
     def create_subscription_table(self):
-        self.client.execute("""
-            CREATE TABLE IF NOT EXISTS subscription(
-                subs_id INTEGER PRIMARY KEY AUTOINCREMENT,
-                kpi_id INTEGER,
-                subscriber TEXT,
-                sampling_duration_s REAL,
-                sampling_interval_s REAL,
-                start_timestamp REAL,
-                end_timestamp REAL
-            );
-        """)
+        try:
+            result = self.client.execute(
+                """
+                CREATE TABLE IF NOT EXISTS subscription(
+                    subs_id INTEGER PRIMARY KEY AUTOINCREMENT,
+                    kpi_id INTEGER,
+                    subscriber TEXT,
+                    sampling_duration_s REAL,
+                    sampling_interval_s REAL,
+                    start_timestamp REAL,
+                    end_timestamp REAL
+                );
+            """
+            )
+            LOGGER.info("Subscription table created in the ManagementDB")
+        except sqlite3.Error as e:
+            LOGGER.debug(f"Subscription table cannot be created in the ManagementDB. {e}")
+            raise Exception
 
     def create_alarm_table(self):
-        self.client.execute("""
-            CREATE TABLE IF NOT EXISTS alarm(
-                alarm_id INTEGER PRIMARY KEY AUTOINCREMENT,
-                alarm_description TEXT,
-                alarm_name TEXT,
-                kpi_id INTEGER,
-                kpi_min_value REAL,
-                kpi_max_value REAL,
-                in_range INTEGER,
-                include_min_value INTEGER,
-                include_max_value INTEGER
-            );
-        """)
-
-    def insert_KPI(self,kpi_description,kpi_sample_type,device_id,endpoint_id,service_id):
-        c = self.client.cursor()
-        c.execute("SELECT kpi_id FROM kpi WHERE device_id is ? AND kpi_sample_type is ? AND endpoint_id is ? AND service_id is ?",(device_id,kpi_sample_type,endpoint_id,service_id))
-        data=c.fetchone()
-        if data is None:
-            c.execute("INSERT INTO kpi (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id) VALUES (?,?,?,?,?)", (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id))
-            self.client.commit()
-            return c.lastrowid
-        else:
-            return data[0]
-            
-    def insert_subscription(self,kpi_id,subscriber,sampling_duration_s,sampling_interval_s,start_timestamp, end_timestamp):
-        c = self.client.cursor()
-        c.execute("SELECT subs_id FROM subscription WHERE kpi_id is ? AND subscriber is ? AND sampling_duration_s is ? AND sampling_interval_s is ? AND start_timestamp is ? AND end_timestamp is ?",(kpi_id,subscriber,sampling_duration_s,sampling_interval_s,start_timestamp, end_timestamp))
-        data=c.fetchone()
-        if data is None:
-            c.execute("INSERT INTO subscription (kpi_id,subscriber,sampling_duration_s,sampling_interval_s,start_timestamp, end_timestamp) VALUES (?,?,?,?,?,?)", (kpi_id,subscriber,sampling_duration_s,sampling_interval_s,start_timestamp, end_timestamp))
-            self.client.commit()
-            return c.lastrowid
-        else:
-            print("already exists")
-            return data[0]
-
-    def insert_alarm(self,alarm_description,alarm_name,kpi_id,kpi_min_value,kpi_max_value,in_range,include_min_value,include_max_value):
-        c = self.client.cursor()
-        c.execute("SELECT alarm_id FROM alarm WHERE alarm_description is ? AND alarm_name is ? AND kpi_id is ? AND kpi_min_value is ? AND kpi_max_value is ? AND in_range is ? AND include_min_value is ? AND include_max_value is ?",(alarm_description,alarm_name,kpi_id,kpi_min_value,kpi_max_value,in_range,include_min_value,include_max_value))
-        data=c.fetchone()
-        if data is None:
-            c.execute("INSERT INTO alarm (alarm_description, alarm_name, kpi_id, kpi_min_value, kpi_max_value, in_range, include_min_value, include_max_value) VALUES (?,?,?,?,?,?,?,?)", (alarm_description,alarm_name,kpi_id,kpi_min_value,kpi_max_value,in_range,include_min_value,include_max_value))
-            self.client.commit()
-            return c.lastrowid
-        else:
-            print("already exists")
-            return data[0]
-
-    def delete_KPI(self,kpi_id):
-        c = self.client.cursor()
-        c.execute("SELECT * FROM kpi WHERE kpi_id is ?",(kpi_id,))       
-        data=c.fetchone()
-        if data is None:
-            return False
-        else:
-            c.execute("DELETE FROM kpi WHERE kpi_id is ?",(kpi_id,))
-            self.client.commit()
-            return True
-
-    def delete_subscription(self,subs_id):
-        c = self.client.cursor()
-        c.execute("SELECT * FROM subscription WHERE subs_id is ?",(subs_id,))       
-        data=c.fetchone()
-        if data is None:
-            return False
-        else:
-            c.execute("DELETE FROM subscription WHERE subs_id is ?",(subs_id,))
-            self.client.commit()
-            return True
-
-    def delete_alarm(self,alarm_id):
-        c = self.client.cursor()
-        c.execute("SELECT * FROM alarm WHERE alarm_id is ?",(alarm_id,))       
-        data=c.fetchone()
-        if data is None:
-            return False
-        else:
-            c.execute("DELETE FROM alarm WHERE alarm_id is ?",(alarm_id,))
-            self.client.commit()
-            return True
-
-    def get_KPI(self,kpi_id):
-        data = self.client.execute("SELECT * FROM kpi WHERE kpi_id is ?",(kpi_id,))
-        return data.fetchone()
-
-    def get_subscription(self,subs_id):
-        data = self.client.execute("SELECT * FROM subscription WHERE subs_id is ?",(subs_id,))
-        return data.fetchone()
-
-    def get_alarm(self,alarm_id):
-        data = self.client.execute("SELECT * FROM alarm WHERE alarm_id is ?",(alarm_id,))
-        return data.fetchone()
-        
+        try:
+            result = self.client.execute(
+                """
+                CREATE TABLE IF NOT EXISTS alarm(
+                    alarm_id INTEGER PRIMARY KEY AUTOINCREMENT,
+                    alarm_description TEXT,
+                    alarm_name TEXT,
+                    kpi_id INTEGER,
+                    kpi_min_value REAL,
+                    kpi_max_value REAL,
+                    in_range INTEGER,
+                    include_min_value INTEGER,
+                    include_max_value INTEGER
+                );
+            """
+            )
+            LOGGER.info("Alarm table created in the ManagementDB")
+        except sqlite3.Error as e:
+            LOGGER.debug(f"Alarm table cannot be created in the ManagementDB. {e}")
+            raise Exception
+
+    def insert_KPI(self, kpi_description, kpi_sample_type, device_id, endpoint_id, service_id):
+        try:
+            c = self.client.cursor()
+            c.execute(
+                "SELECT kpi_id FROM kpi WHERE device_id is ? AND kpi_sample_type is ? AND endpoint_id is ? AND service_id is ?",
+                (device_id, kpi_sample_type, endpoint_id, service_id),
+            )
+            data = c.fetchone()
+            if data is None:
+                c.execute(
+                    "INSERT INTO kpi (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id) VALUES (?,?,?,?,?)",
+                    (kpi_description, kpi_sample_type, device_id, endpoint_id, service_id),
+                )
+                self.client.commit()
+                kpi_id = c.lastrowid
+                LOGGER.debug(f"KPI {kpi_id} succesfully inserted in the ManagementDB")
+                return kpi_id
+            else:
+                kpi_id = data[0]
+                LOGGER.debug(f"KPI {kpi_id} already exists")
+                return kpi_id
+        except sqlite3.Error as e:
+            LOGGER.debug("KPI cannot be inserted in the ManagementDB: {e}")
+
+    def insert_subscription(
+        self, kpi_id, subscriber, sampling_duration_s, sampling_interval_s, start_timestamp, end_timestamp
+    ):
+        try:
+            c = self.client.cursor()
+            c.execute(
+                "SELECT subs_id FROM subscription WHERE kpi_id is ? AND subscriber is ? AND sampling_duration_s is ? AND sampling_interval_s is ? AND start_timestamp is ? AND end_timestamp is ?",
+                (kpi_id, subscriber, sampling_duration_s, sampling_interval_s, start_timestamp, end_timestamp),
+            )
+            data = c.fetchone()
+            if data is None:
+                c.execute(
+                    "INSERT INTO subscription (kpi_id,subscriber,sampling_duration_s,sampling_interval_s,start_timestamp, end_timestamp) VALUES (?,?,?,?,?,?)",
+                    (kpi_id, subscriber, sampling_duration_s, sampling_interval_s, start_timestamp, end_timestamp),
+                )
+                self.client.commit()
+                subs_id = c.lastrowid
+                LOGGER.debug(f"Subscription {subs_id} succesfully inserted in the ManagementDB")
+                return subs_id
+            else:
+                subs_id = data[0]
+                LOGGER.debug(f"Subscription {subs_id} already exists")
+                return subs_id
+        except sqlite3.Error as e:
+            LOGGER.debug("Subscription cannot be inserted in the ManagementDB: {e}")
+
+    def insert_alarm(
+        self,
+        alarm_description,
+        alarm_name,
+        kpi_id,
+        kpi_min_value,
+        kpi_max_value,
+        in_range,
+        include_min_value,
+        include_max_value,
+    ):
+        try:
+            c = self.client.cursor()
+            c.execute(
+                "SELECT alarm_id FROM alarm WHERE alarm_description is ? AND alarm_name is ? AND kpi_id is ? AND kpi_min_value is ? AND kpi_max_value is ? AND in_range is ? AND include_min_value is ? AND include_max_value is ?",
+                (
+                    alarm_description,
+                    alarm_name,
+                    kpi_id,
+                    kpi_min_value,
+                    kpi_max_value,
+                    in_range,
+                    include_min_value,
+                    include_max_value,
+                ),
+            )
+            data = c.fetchone()
+            if data is None:
+                c.execute(
+                    "INSERT INTO alarm (alarm_description, alarm_name, kpi_id, kpi_min_value, kpi_max_value, in_range, include_min_value, include_max_value) VALUES (?,?,?,?,?,?,?,?)",
+                    (
+                        alarm_description,
+                        alarm_name,
+                        kpi_id,
+                        kpi_min_value,
+                        kpi_max_value,
+                        in_range,
+                        include_min_value,
+                        include_max_value,
+                    ),
+                )
+                self.client.commit()
+                alarm_id = c.lastrowid
+                LOGGER.debug(f"Alarm {alarm_id} succesfully inserted in the ManagementDB")
+                return alarm_id
+            else:
+                alarm_id = data[0]
+                LOGGER.debug(f"Alarm {alarm_id} already exists")
+                return alarm_id
+        except sqlite3.Error as e:
+            LOGGER.debug(f"Alarm cannot be inserted in the ManagementDB: {e}")
+
+    def delete_KPI(self, kpi_id):
+        try:
+            c = self.client.cursor()
+            c.execute("SELECT * FROM kpi WHERE kpi_id is ?", (kpi_id,))
+            data = c.fetchone()
+            if data is None:
+                LOGGER.debug(f"KPI {kpi_id} does not exists")
+                return False
+            else:
+                c.execute("DELETE FROM kpi WHERE kpi_id is ?", (kpi_id,))
+                self.client.commit()
+                LOGGER.debug(f"KPI {kpi_id} deleted from the ManagementDB")
+                return True
+        except sqlite3.Error as e:
+            LOGGER.debug(f"KPI cannot be deleted from the ManagementDB: {e}")
+
+    def delete_subscription(self, subs_id):
+        try:
+            c = self.client.cursor()
+            c.execute("SELECT * FROM subscription WHERE subs_id is ?", (subs_id,))
+            data = c.fetchone()
+            if data is None:
+                LOGGER.debug(f"Subscription {subs_id} does not exists")
+                return False
+            else:
+                c.execute("DELETE FROM subscription WHERE subs_id is ?", (subs_id,))
+                self.client.commit()
+                LOGGER.debug(f"Subscription {subs_id} deleted from the ManagementDB")
+                return True
+        except sqlite3.Error as e:
+            LOGGER.debug(f"Subscription cannot be deleted from the ManagementDB: {e}")
+
+    def delete_alarm(self, alarm_id):
+        try:
+            c = self.client.cursor()
+            c.execute("SELECT * FROM alarm WHERE alarm_id is ?", (alarm_id,))
+            data = c.fetchone()
+            if data is None:
+                LOGGER.debug(f"Alarm {alarm_id} does not exists")
+                return False
+            else:
+                c.execute("DELETE FROM alarm WHERE alarm_id is ?", (alarm_id,))
+                self.client.commit()
+                LOGGER.debug(f"Alarm {alarm_id} deleted from the ManagementDB")
+                return True
+        except sqlite3.Error as e:
+            LOGGER.debug(f"Alarm cannot be deleted from the ManagementDB: {e}")
+
+    def get_KPI(self, kpi_id):
+        try:
+            data = self.client.execute("SELECT * FROM kpi WHERE kpi_id is ?", (kpi_id,)).fetchone()
+            if data:
+                LOGGER.debug(f"KPI {kpi_id} succesfully retrieved from the ManagementDB")
+                return data
+            else:
+                LOGGER.debug(f"KPI {kpi_id} does not exists")
+                return data
+        except sqlite3.Error as e:
+            LOGGER.debug(f"KPI {kpi_id} cannot be retrieved from the ManagementDB: {e}")
+
+    def get_subscription(self, subs_id):
+        try:
+            data = self.client.execute("SELECT * FROM subscription WHERE subs_id is ?", (subs_id,)).fetchone()
+            if data:
+                LOGGER.debug(f"Subscription {subs_id} succesfully retrieved from the ManagementDB")
+                return data
+            else:
+                LOGGER.debug(f"Subscription {subs_id} does not exists")
+                return data
+        except sqlite3.Error as e:
+            LOGGER.debug(f"Subscription {subs_id} cannot be retrieved from the ManagementDB: {e}")
+
+    def get_alarm(self, alarm_id):
+        try:
+            data = self.client.execute("SELECT * FROM alarm WHERE alarm_id is ?", (alarm_id,)).fetchone()
+            if data:
+                LOGGER.debug(f"Alarm {alarm_id} succesfully retrieved from the ManagementDB")
+                return data
+            else:
+                print(data)
+                LOGGER.debug(f"Alarm {alarm_id} does not exists")
+                return data
+        except sqlite3.Error as e:
+            LOGGER.debug(f"Alarm {alarm_id} cannot be retrieved from the ManagementDB: {e}")
+
     def get_KPIS(self):
-        data = self.client.execute("SELECT * FROM kpi")
-        return data.fetchall()
+        try:
+            data = self.client.execute("SELECT * FROM kpi").fetchall()
+            LOGGER.debug(f"KPIs succesfully retrieved from the ManagementDB")
+            return data
+        except sqlite3.Error as e:
+            LOGGER.debug(f"KPIs cannot be retrieved from the ManagementDB: {e}")
 
     def get_subscriptions(self):
-        data = self.client.execute("SELECT * FROM subscription")
-        return data.fetchall()
+        try:
+            data = self.client.execute("SELECT * FROM subscription").fetchall()
+            LOGGER.debug(f"Subscriptions succesfully retrieved from the ManagementDB")
+            return data
+        except sqlite3.Error as e:
+            LOGGER.debug(f"Subscriptions cannot be retrieved from the ManagementDB: {e}")
 
     def get_alarms(self):
-        data = self.client.execute("SELECT * FROM alarm")
-        return data.fetchall()
+        try:
+            data = self.client.execute("SELECT * FROM alarm").fetchall()
+            LOGGER.debug(f"Alarms succesfully retrieved from the ManagementDB")
+            return data
+        except sqlite3.Error as e:
+            LOGGER.debug(f"Alarms cannot be retrieved from the ManagementDB: {e}")
diff --git a/src/monitoring/service/MetricsDBTools.py b/src/monitoring/service/MetricsDBTools.py
index dc194c430c9700a2d89e0757c75c64025082ac29..16e6373f542656b4c172c8d619bf3f17ca5df404 100644
--- a/src/monitoring/service/MetricsDBTools.py
+++ b/src/monitoring/service/MetricsDBTools.py
@@ -12,64 +12,261 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import time
+from random import random
+
 from questdb.ingress import Sender, IngressError
 import requests
 import json
 import logging
 import datetime
+from common.tools.timestamp.Converters import timestamp_float_to_string, timestamp_utcnow_to_float
+import psycopg2
 
 LOGGER = logging.getLogger(__name__)
 
+
 class MetricsDB():
-  def __init__(self, host, ilp_port, rest_port, table):
-    self.host=host
-    self.ilp_port=int(ilp_port)
-    self.rest_port=rest_port
-    self.table=table
-    self.create_table()
-
-  def write_KPI(self,time,kpi_id,kpi_sample_type,device_id,endpoint_id,service_id,kpi_value):
-    counter=0
-    number_of_retries=10
-    while (counter<number_of_retries):
-      try:
-        with Sender(self.host, self.ilp_port) as sender:
-          sender.row(
-          self.table,
-          symbols={
-              'kpi_id': kpi_id,
-              'kpi_sample_type': kpi_sample_type,
-              'device_id': device_id,
-              'endpoint_id': endpoint_id,
-              'service_id': service_id},
-          columns={
-              'kpi_value': kpi_value},
-          at=datetime.datetime.fromtimestamp(time))
-          sender.flush()
-        counter=number_of_retries
-        LOGGER.info(f"KPI written")
-      except IngressError as ierr:
-        # LOGGER.info(ierr)
-        # LOGGER.info(f"Ingress Retry number {counter}")
-        counter=counter+1
-
-
-  def run_query(self, sql_query):
-    query_params = {'query': sql_query, 'fmt' : 'json'}
-    url = f"http://{self.host}:{self.rest_port}/exec"
-    response = requests.get(url, params=query_params)
-    json_response = json.loads(response.text)
-    LOGGER.info(f"Query executed, result:{json_response}")
-  
-  def create_table(self):
-    query = f'CREATE TABLE IF NOT EXISTS {self.table}'\
-    '(kpi_id SYMBOL,'\
-    'kpi_sample_type SYMBOL,'\
-    'device_id SYMBOL,'\
-    'endpoint_id SYMBOL,'\
-    'service_id SYMBOL,'\
-    'timestamp TIMESTAMP,'\
-    'kpi_value DOUBLE)'\
-    'TIMESTAMP(timestamp);'
-    self.run_query(query)
-    LOGGER.info(f"Table {self.table} created")
+    def __init__(self, host, ilp_port=9009, rest_port=9000, table="monitoring", commit_lag_ms=1000, retries=10,
+                 postgre=False, postgre_port=8812, postgre_user='admin', postgre_password='quest'):
+        try:
+            self.host = host
+            self.ilp_port = int(ilp_port)
+            self.rest_port = rest_port
+            self.table = table
+            self.commit_lag_ms = commit_lag_ms
+            self.retries = retries
+            self.postgre = postgre
+            self.postgre_port = postgre_port
+            self.postgre_user = postgre_user
+            self.postgre_password = postgre_password
+            self.create_table()
+            LOGGER.info("MetricsDB initialized")
+        except:
+            LOGGER.info("MetricsDB cannot be initialized")
+            raise Exception("Critical error in the monitoring component")
+
+    def is_postgre_enabled(self):
+        LOGGER.info(f"PostgreSQL is {self.postgre}")
+        return self.postgre
+
+    def get_retry_number(self):
+        LOGGER.info(f"Retry number is {self.retries}")
+        return self.retries
+
+    def get_commit_lag(self):
+        LOGGER.info(f"Commit lag of monitoring queries is {self.commit_lag_ms} ms")
+        return self.commit_lag_ms
+
+    def enable_postgre_mode(self):
+        self.postgre = True
+        LOGGER.info("MetricsDB PostgreSQL query mode enabled")
+
+    def disable_postgre_mode(self):
+        self.postgre = False
+        LOGGER.info("MetricsDB REST query mode enabled")
+
+    def set_postgre_credentials(self, user, password):
+        self.postgre_user = user
+        self.postgre_password = password
+        LOGGER.info("MetricsDB PostgreSQL credentials changed")
+
+    def set_retry_number(self, retries):
+        self.retries = retries
+        LOGGER.info(f"Retriy number changed to {retries}")
+
+    def set_commit_lag(self, commit_lag_ms):
+        self.commit_lag_ms = commit_lag_ms
+        LOGGER.info(f"Commit lag of monitoring queries changed to {commit_lag_ms} ms")
+
+    def create_table(self):
+        try:
+            query = f'CREATE TABLE IF NOT EXISTS {self.table}' \
+                    '(kpi_id SYMBOL,' \
+                    'kpi_sample_type SYMBOL,' \
+                    'device_id SYMBOL,' \
+                    'endpoint_id SYMBOL,' \
+                    'service_id SYMBOL,' \
+                    'timestamp TIMESTAMP,' \
+                    'kpi_value DOUBLE)' \
+                    'TIMESTAMP(timestamp);'
+            result = self.run_query(query)
+            if (result == True):
+                LOGGER.info(f"Table {self.table} created")
+        except (Exception) as e:
+            LOGGER.debug(f"Table {self.table} cannot be created. {e}")
+            raise Exception
+
+    def write_KPI(self, time, kpi_id, kpi_sample_type, device_id, endpoint_id, service_id, kpi_value):
+        counter = 0
+        while (counter < self.retries):
+            try:
+                with Sender(self.host, self.ilp_port) as sender:
+                    sender.row(
+                        self.table,
+                        symbols={
+                            'kpi_id': kpi_id,
+                            'kpi_sample_type': kpi_sample_type,
+                            'device_id': device_id,
+                            'endpoint_id': endpoint_id,
+                            'service_id': service_id},
+                        columns={
+                            'kpi_value': kpi_value},
+                        at=datetime.datetime.fromtimestamp(time))
+                    sender.flush()
+                counter = self.retries
+                LOGGER.debug(f"KPI written in the MetricsDB")
+            except (Exception, IngressError) as e:
+                counter = counter + 1
+                if counter == self.retries:
+                    raise Exception(f"Maximum number of retries achieved: {self.retries}")
+
+    def run_query(self, sql_query):
+        counter = 0
+        while (counter < self.retries):
+            try:
+                query_params = {'query': sql_query, 'fmt': 'json'}
+                url = f"http://{self.host}:{self.rest_port}/exec"
+                response = requests.get(url, params=query_params)
+                json_response = json.loads(response.text)
+                if ('ddl' in json_response):
+                    LOGGER.debug(f"REST query executed succesfully, result: {json_response['ddl']}")
+                    counter = self.retries
+                    return True
+                elif ('dataset' in json_response):
+                    LOGGER.debug(f"REST query executed, result: {json_response['dataset']}")
+                    counter = self.retries
+                    return json_response['dataset']
+            except (Exception, requests.exceptions.RequestException) as e:
+                counter = counter + 1
+                if counter == self.retries:
+                    raise Exception(f"Maximum number of retries achieved: {self.retries}")
+
+    def run_query_postgre(self, postgre_sql_query):
+        connection = None
+        cursor = None
+        counter = 0
+        while (counter < self.retries):
+            try:
+                connection = psycopg2.connect(
+                    user=self.postgre_user,
+                    password=self.postgre_password,
+                    host=self.host,
+                    port=self.postgre_port,
+                    database=self.table)
+                cursor = connection.cursor()
+                cursor.execute(postgre_sql_query)
+                result = cursor.fetchall()
+                LOGGER.debug(f"PostgreSQL query executed, result: {result}")
+                counter = self.retries
+                return result
+            except (Exception, psycopg2.Error) as e:
+                counter = counter + 1
+                if counter == self.retries:
+                    raise Exception(f"Maximum number of retries achieved: {self.retries}")
+            finally:
+                if cursor:
+                    cursor.close()
+                if connection:
+                    connection.close()
+
+    def get_subscription_data(self,subs_queue, kpi_id, sampling_interval_s=1):
+        try:
+            end_date = timestamp_utcnow_to_float() - self.commit_lag_ms / 1000
+            start_date = end_date - sampling_interval_s
+            query = f"SELECT kpi_id, timestamp, kpi_value FROM {self.table} WHERE kpi_id = '{kpi_id}' AND (timestamp BETWEEN '{timestamp_float_to_string(start_date)}' AND '{timestamp_float_to_string(end_date)}')"
+            if self.postgre:
+                kpi_list = self.run_query_postgre(query)
+                LOGGER.debug(f"kpi_list postgre: {kpi_list}")
+            else:
+                kpi_list = self.run_query(query)
+                LOGGER.debug(f"kpi_list influx: {kpi_list}")
+            if kpi_list:
+                subs_queue.put_nowait(kpi_list)
+                LOGGER.debug(f"New data received for subscription to KPI {kpi_id}")
+            else:
+                LOGGER.debug(f"No new data for the subscription to KPI {kpi_id}")
+        except (Exception) as e:
+            LOGGER.debug(f"Subscription data cannot be retrieved. {e}")
+
+    def get_alarm_data(self, alarm_queue, kpi_id, kpiMinValue, kpiMaxValue, inRange=True, includeMinValue=True, includeMaxValue=True,
+                       subscription_frequency_ms=1000):
+        try:
+            end_date = timestamp_utcnow_to_float() - self.commit_lag_ms / 1000
+            start_date = end_date - subscription_frequency_ms / 1000
+            query = f"SELECT kpi_id, timestamp, kpi_value FROM {self.table} WHERE kpi_id = '{kpi_id}' AND (timestamp BETWEEN '{timestamp_float_to_string(start_date)}' AND '{timestamp_float_to_string(end_date)}')"
+            if self.postgre:
+                kpi_list = self.run_query_postgre(query)
+            else:
+                kpi_list = self.run_query(query)
+            if kpi_list:
+                LOGGER.debug(f"New data received for alarm of KPI {kpi_id}")
+                for kpi in kpi_list:
+                    alarm = False
+                    kpi_value = kpi[2]
+                    if (kpiMinValue == kpi_value and kpiMaxValue == kpi_value and inRange):
+                        alarm = True
+                    elif (
+                            inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and includeMaxValue):
+                        if (kpi_value >= kpiMinValue and kpi_value <= kpiMaxValue):
+                            alarm = True
+                    elif (
+                            inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and not includeMaxValue):
+                        if (kpi_value >= kpiMinValue and kpi_value < kpiMaxValue):
+                            alarm = True
+                    elif (
+                            inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and includeMaxValue):
+                        if (kpi_value > kpiMinValue and kpi_value <= kpiMaxValue):
+                            alarm = True
+                    elif (
+                            inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and not includeMaxValue):
+                        if (kpi_value > kpiMinValue and kpi_value < kpiMaxValue):
+                            alarm = True
+                    elif (
+                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and includeMaxValue):
+                        if (kpi_value <= kpiMinValue or kpi_value >= kpiMaxValue):
+                            alarm = True
+                    elif (
+                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and includeMinValue and not includeMaxValue):
+                        if (kpi_value <= kpiMinValue or kpi_value > kpiMaxValue):
+                            alarm = True
+                    elif (
+                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and includeMaxValue):
+                        if (kpi_value < kpiMinValue or kpi_value >= kpiMaxValue):
+                            alarm = True
+                    elif (
+                            not inRange and kpiMinValue is not None and kpiMaxValue is not None and not includeMinValue and not includeMaxValue):
+                        if (kpi_value < kpiMinValue or kpi_value > kpiMaxValue):
+                            alarm = True
+                    elif (inRange and kpiMinValue is not None and kpiMaxValue is None and includeMinValue):
+                        if (kpi_value >= kpiMinValue):
+                            alarm = True
+                    elif (inRange and kpiMinValue is not None and kpiMaxValue is None and not includeMinValue):
+                        if (kpi_value > kpiMinValue):
+                            alarm = True
+                    elif (not inRange and kpiMinValue is not None and kpiMaxValue is None and not includeMinValue):
+                        if (kpi_value <= kpiMinValue):
+                            alarm = True
+                    elif (not inRange and kpiMinValue is not None and kpiMaxValue is None and not includeMinValue):
+                        if (kpi_value <= kpiMinValue):
+                            alarm = True
+                    elif (inRange and kpiMinValue is None and kpiMaxValue is not None and includeMaxValue):
+                        if (kpi_value <= kpiMaxValue):
+                            alarm = True
+                    elif (inRange and kpiMinValue is None and kpiMaxValue is not None and not includeMaxValue):
+                        if (kpi_value < kpiMaxValue):
+                            alarm = True
+                    elif (not inRange and kpiMinValue is None and kpiMaxValue is not None and not includeMaxValue):
+                        if (kpi_value >= kpiMaxValue):
+                            alarm = True
+                    elif (not inRange and kpiMinValue is None and kpiMaxValue is not None and not includeMaxValue):
+                        if (kpi_value >= kpiMaxValue):
+                            alarm = True
+                    if alarm:
+                        # queue.append[kpi]
+                        alarm_queue.put_nowait(kpi)
+                        LOGGER.debug(f"Alarm of KPI {kpi_id} triggered -> kpi_value:{kpi[2]}, timestamp:{kpi[1]}")
+                else:
+                    LOGGER.debug(f"No new data for the alarm of KPI {kpi_id}")
+        except (Exception) as e:
+            LOGGER.debug(f"Alarm data cannot be retrieved. {e}")
\ No newline at end of file
diff --git a/src/monitoring/service/MonitoringService.py b/src/monitoring/service/MonitoringService.py
index 1a79ef9c131f8c24e50e62423a06181b4164753b..e2cbe2862894aec7b571ae857ad4c4fffa3c94c6 100644
--- a/src/monitoring/service/MonitoringService.py
+++ b/src/monitoring/service/MonitoringService.py
@@ -16,7 +16,7 @@ from common.Constants import ServiceNameEnum
 from common.Settings import get_service_port_grpc
 from common.proto.monitoring_pb2_grpc import add_MonitoringServiceServicer_to_server
 from common.tools.service.GenericGrpcService import GenericGrpcService
-from .MonitoringServiceServicerImpl import MonitoringServiceServicerImpl
+from monitoring.service.MonitoringServiceServicerImpl import MonitoringServiceServicerImpl
 
 class MonitoringService(GenericGrpcService):
     def __init__(self, cls_name: str = __name__) -> None:
diff --git a/src/monitoring/service/MonitoringServiceServicerImpl.py b/src/monitoring/service/MonitoringServiceServicerImpl.py
index df3b907415aabe0ed4c276ac6ac09582636ebe6b..7cd47f187986a0c32eea2ac8405183ac4418d100 100644
--- a/src/monitoring/service/MonitoringServiceServicerImpl.py
+++ b/src/monitoring/service/MonitoringServiceServicerImpl.py
@@ -12,7 +12,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import os, grpc, logging
+import os, grpc
+from queue import Queue
 
 from typing import Iterator
 
@@ -23,16 +24,20 @@ from common.proto.context_pb2 import Empty
 from common.proto.device_pb2 import MonitoringSettings
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from common.proto.monitoring_pb2_grpc import MonitoringServiceServicer
-from common.proto.monitoring_pb2 import AlarmResponse, AlarmDescriptor, AlarmIDList, SubsIDList, KpiId, \
+from common.proto.monitoring_pb2 import AlarmResponse, AlarmDescriptor, AlarmList, SubsList, KpiId, \
     KpiDescriptor, KpiList, KpiQuery, SubsDescriptor, SubscriptionID, AlarmID, KpiDescriptorList, \
-    MonitorKpiRequest, Kpi, AlarmSubscription
+    MonitorKpiRequest, Kpi, AlarmSubscription, SubsResponse
 from common.rpc_method_wrapper.ServiceExceptions import ServiceException
+from common.tools.timestamp.Converters import timestamp_string_to_float
 
-from monitoring.service import SqliteTools, MetricsDBTools
+from monitoring.service import ManagementDBTools, MetricsDBTools
 from device.client.DeviceClient import DeviceClient
 
 from prometheus_client import Counter, Summary
 
+from monitoring.service.AlarmManager import AlarmManager
+from monitoring.service.SubscriptionManager import SubscriptionManager
+
 LOGGER = getJSONLogger('monitoringservice-server')
 LOGGER.setLevel('DEBUG')
 
@@ -40,14 +45,14 @@ MONITORING_GETINSTANTKPI_REQUEST_TIME = Summary(
     'monitoring_getinstantkpi_processing_seconds', 'Time spent processing monitoring instant kpi request')
 MONITORING_INCLUDEKPI_COUNTER = Counter('monitoring_includekpi_counter', 'Monitoring include kpi request counter')
 
-METRICSDB_HOSTNAME  = os.environ.get("METRICSDB_HOSTNAME")
-METRICSDB_ILP_PORT  = os.environ.get("METRICSDB_ILP_PORT")
+METRICSDB_HOSTNAME = os.environ.get("METRICSDB_HOSTNAME")
+METRICSDB_ILP_PORT = os.environ.get("METRICSDB_ILP_PORT")
 METRICSDB_REST_PORT = os.environ.get("METRICSDB_REST_PORT")
-METRICSDB_TABLE     = os.environ.get("METRICSDB_TABLE")
-
+METRICSDB_TABLE = os.environ.get("METRICSDB_TABLE")
 
-DEVICESERVICE_SERVICE_HOST = get_setting('DEVICESERVICE_SERVICE_HOST',      default=get_service_host(ServiceNameEnum.DEVICE)     )
-DEVICESERVICE_SERVICE_PORT_GRPC = get_setting('DEVICESERVICE_SERVICE_PORT_GRPC', default=get_service_port_grpc(ServiceNameEnum.DEVICE))
+DEVICESERVICE_SERVICE_HOST = get_setting('DEVICESERVICE_SERVICE_HOST', default=get_service_host(ServiceNameEnum.DEVICE))
+DEVICESERVICE_SERVICE_PORT_GRPC = get_setting('DEVICESERVICE_SERVICE_PORT_GRPC',
+                                              default=get_service_port_grpc(ServiceNameEnum.DEVICE))
 
 
 class MonitoringServiceServicerImpl(MonitoringServiceServicer):
@@ -55,34 +60,41 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
         LOGGER.info('Init monitoringService')
 
         # Init sqlite monitoring db
-        self.sql_db = SqliteTools.SQLite('monitoring.db')
-        self.deviceClient = DeviceClient(host=DEVICESERVICE_SERVICE_HOST, port=DEVICESERVICE_SERVICE_PORT_GRPC)  # instantiate the client
-
-        self.metrics_db = MetricsDBTools.MetricsDB(METRICSDB_HOSTNAME,METRICSDB_ILP_PORT,METRICSDB_REST_PORT,METRICSDB_TABLE)
+        self.management_db = ManagementDBTools.ManagementDB('monitoring.db')
+        self.deviceClient = DeviceClient(host=DEVICESERVICE_SERVICE_HOST,
+                                         port=DEVICESERVICE_SERVICE_PORT_GRPC)  # instantiate the client
+
+        self.metrics_db = MetricsDBTools.MetricsDB(METRICSDB_HOSTNAME, METRICSDB_ILP_PORT, METRICSDB_REST_PORT,
+                                                   METRICSDB_TABLE)
+        self.subs_manager = SubscriptionManager(self.metrics_db)
+        self.alarm_manager = AlarmManager(self.metrics_db)
         LOGGER.info('MetricsDB initialized')
 
     # SetKpi (SetKpiRequest) returns (KpiId) {}
     def SetKpi(
-        self, request : KpiDescriptor, grpc_context : grpc.ServicerContext
+            self, request: KpiDescriptor, grpc_context: grpc.ServicerContext
     ) -> KpiId:
         # CREATEKPI_COUNTER_STARTED.inc()
         LOGGER.info('SetKpi')
         try:
             # Here the code to create a sqlite query to crete a KPI and return a KpiID
-            kpi_id = KpiId()
+            response = KpiId()
 
             kpi_description = request.kpi_description
             kpi_sample_type = request.kpi_sample_type
-            kpi_device_id   = request.device_id.device_uuid.uuid
+            kpi_device_id = request.device_id.device_uuid.uuid
             kpi_endpoint_id = request.endpoint_id.endpoint_uuid.uuid
-            kpi_service_id  = request.service_id.service_uuid.uuid
+            kpi_service_id = request.service_id.service_uuid.uuid
 
-            data = self.sql_db.insert_KPI(
-                kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
+            if request.kpi_id.kpi_id.uuid is not "":
+                response.kpi_id.uuid = request.kpi_id.kpi_id.uuid
+            #     Here the code to modify an existing kpi
+            else:
+                data = self.management_db.insert_KPI(
+                    kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
+                response.kpi_id.uuid = str(data)
 
-            kpi_id.kpi_id.uuid = str(data)
-            # CREATEKPI_COUNTER_COMPLETED.inc()
-            return kpi_id
+            return response
         except ServiceException as e:
             LOGGER.exception('SetKpi exception')
             # CREATEKPI_COUNTER_FAILED.inc()
@@ -92,11 +104,17 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
             # CREATEKPI_COUNTER_FAILED.inc()
             grpc_context.abort(grpc.StatusCode.INTERNAL, str(e))
 
-    def DeleteKpi ( self, request : KpiId, grpc_context : grpc.ServicerContext) -> Empty:
+    def DeleteKpi(self, request: KpiId, grpc_context: grpc.ServicerContext) -> Empty:
 
         LOGGER.info('DeleteKpi')
         try:
-             # TBC
+            LOGGER.debug(f'DeleteKpi with KpiID: {request.kpi_id.uuid}')
+            kpi_id = int(request.kpi_id.uuid)
+            kpi = self.management_db.get_KPI(kpi_id)
+            if kpi:
+                self.management_db.delete_KPI(kpi_id)
+            else:
+                LOGGER.info('DeleteKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
             return Empty()
         except ServiceException as e:
             LOGGER.exception('DeleteKpi exception')
@@ -104,71 +122,77 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
         except Exception as e:  # pragma: no cover
             LOGGER.exception('DeleteKpi exception')
 
-    def GetKpiDescriptorList ( self, request : Empty, grpc_context : grpc.ServicerContext) -> KpiDescriptorList:
-
-        LOGGER.info('GetKpiDescriptorList')
+    def GetKpiDescriptor(self, request: KpiId, grpc_context: grpc.ServicerContext) -> KpiDescriptor:
+        LOGGER.info('getting Kpi by KpiID')
         try:
-             # TBC
-            return KpiDescriptorList()
+            kpi_id = request.kpi_id.uuid
+            kpi_db = self.management_db.get_KPI(int(kpi_id))
+            kpiDescriptor = KpiDescriptor()
+            if kpi_db is None:
+                LOGGER.info('GetKpiDescriptor error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+            else:
+                kpiDescriptor.kpi_description = kpi_db[1]
+                kpiDescriptor.kpi_sample_type = kpi_db[2]
+                kpiDescriptor.device_id.device_uuid.uuid = str(kpi_db[3])
+                kpiDescriptor.endpoint_id.endpoint_uuid.uuid = str(kpi_db[4])
+                kpiDescriptor.service_id.service_uuid.uuid = str(kpi_db[5])
+            return kpiDescriptor
         except ServiceException as e:
-            LOGGER.exception('GetKpiDescriptorList exception')
+            LOGGER.exception('GetKpiDescriptor exception')
             grpc_context.abort(e.code, e.details)
-        except Exception as e:  # pragma: no cover
-            LOGGER.exception('GetKpiDescriptorList exception')
+        except Exception:  # pragma: no cover
+            LOGGER.exception('GetKpiDescriptor exception')
 
-    # rpc MonitorKpi (MonitorKpiRequest) returns (context.Empty) {}
-    def MonitorKpi ( self, request : MonitorKpiRequest, grpc_context : grpc.ServicerContext) -> Empty:
+    def GetKpiDescriptorList(self, request: Empty, grpc_context: grpc.ServicerContext) -> KpiDescriptorList:
 
-        LOGGER.info('MonitorKpi')
+        LOGGER.info('GetKpiDescriptorList')
         try:
-            # Sets the request to send to the device service
-            monitor_device_request = MonitoringSettings()
+            kpi_descriptor_list = KpiDescriptorList()
 
-            kpiDescriptor = self.GetKpiDescriptor(request.kpi_id, grpc_context)
+            data = self.management_db.get_KPIS()
+            LOGGER.debug(f"data: {data}")
 
-            monitor_device_request.kpi_descriptor.CopyFrom(kpiDescriptor)
-            monitor_device_request.kpi_id.kpi_id.uuid  = request.kpi_id.kpi_id.uuid
-            monitor_device_request.sampling_duration_s = request.monitoring_window_s
-            monitor_device_request.sampling_interval_s = request.sampling_rate_s
+            for item in data:
+                kpi_descriptor = KpiDescriptor()
+                kpi_descriptor.kpi_id.kpi_id.uuid = str(item[0])
+                kpi_descriptor.kpi_description = item[1]
+                kpi_descriptor.kpi_sample_type = item[2]
+                kpi_descriptor.device_id.device_uuid.uuid = str(item[3])
+                kpi_descriptor.endpoint_id.endpoint_uuid.uuid = str(item[4])
+                kpi_descriptor.service_id.service_uuid.uuid = str(item[5])
 
-            device_client = DeviceClient()
-            device_client.MonitorDeviceKpi(monitor_device_request)
+                kpi_descriptor_list.kpi_descriptor_list.append(kpi_descriptor)
 
+            return kpi_descriptor_list
         except ServiceException as e:
-            LOGGER.exception('MonitorKpi exception')
-            # CREATEKPI_COUNTER_FAILED.inc()
+            LOGGER.exception('GetKpiDescriptorList exception')
             grpc_context.abort(e.code, e.details)
         except Exception as e:  # pragma: no cover
-            LOGGER.exception('MonitorKpi exception')
-            grpc_context.abort(grpc.StatusCode.INTERNAL, str(e))
-            # CREATEKPI_COUNTER_FAILED.inc()
-
-        return Empty()
+            LOGGER.exception('GetKpiDescriptorList exception')
 
-    # rpc IncludeKpi(IncludeKpiRequest)  returns(context.Empty)    {}
-    def IncludeKpi(self, request : Kpi, grpc_context : grpc.ServicerContext) -> Empty:
+    def IncludeKpi(self, request: Kpi, grpc_context: grpc.ServicerContext) -> Empty:
 
         LOGGER.info('IncludeKpi')
 
         try:
+            kpi_id = request.kpi_id.kpi_id.uuid
             kpiDescriptor = self.GetKpiDescriptor(request.kpi_id, grpc_context)
-            if kpiDescriptor is None:
-                LOGGER.warning('Ignoring sample with KPIId({:s}): not found in database'.format(str(request.kpi_id)))
-                return Empty()
-
-            kpiSampleType   = KpiSampleType.Name(kpiDescriptor.kpi_sample_type).upper().replace('KPISAMPLETYPE_', '')
-            kpiId           = request.kpi_id.kpi_id.uuid
-            deviceId        = kpiDescriptor.device_id.device_uuid.uuid
-            endpointId      = kpiDescriptor.endpoint_id.endpoint_uuid.uuid
-            serviceId       = kpiDescriptor.service_id.service_uuid.uuid
-            time_stamp      = request.timestamp.timestamp
-            kpi_value       = getattr(request.kpi_value, request.kpi_value.WhichOneof('value'))
 
-            # Build the structure to be included as point in the MetricsDB
-            self.metrics_db.write_KPI(time_stamp,kpiId,kpiSampleType,deviceId,endpointId,serviceId,kpi_value)
-
-            #self.influx_db.read_KPI_points()
+            if kpiDescriptor is None:
+                LOGGER.info('IncludeKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+            else:
+                kpiSampleType = KpiSampleType.Name(kpiDescriptor.kpi_sample_type).upper().replace('KPISAMPLETYPE_', '')
+                kpiId = kpi_id
+                deviceId = kpiDescriptor.device_id.device_uuid.uuid
+                endpointId = kpiDescriptor.endpoint_id.endpoint_uuid.uuid
+                serviceId = kpiDescriptor.service_id.service_uuid.uuid
+                time_stamp = request.timestamp.timestamp
+                kpi_value = getattr(request.kpi_value, request.kpi_value.WhichOneof('value'))
+
+                # Build the structure to be included as point in the MetricsDB
+                self.metrics_db.write_KPI(time_stamp, kpiId, kpiSampleType, deviceId, endpointId, serviceId, kpi_value)
 
+            return Empty()
         except ServiceException as e:
             LOGGER.exception('IncludeKpi exception')
             # CREATEKPI_COUNTER_FAILED.inc()
@@ -176,98 +200,154 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
         except Exception:  # pragma: no cover
             LOGGER.exception('IncludeKpi exception')
             # CREATEKPI_COUNTER_FAILED.inc()
-        return Empty()
 
-    # def GetStreamKpi ( self, request, grpc_context : grpc.ServicerContext):
-    #
-    #     LOGGER.info('GetStreamKpi')
-    #     yield monitoring_pb2.Kpi()
-    #
-    # @MONITORING_GETINSTANTKPI_REQUEST_TIME.time()
-    # def GetInstantKpi ( self, request, grpc_context : grpc.ServicerContext):
-    #
-    #     LOGGER.info('GetInstantKpi')
-    #     return monitoring_pb2.Kpi()
+    def MonitorKpi(self, request: MonitorKpiRequest, grpc_context: grpc.ServicerContext) -> Empty:
 
-
-    def GetKpiDescriptor(self, request : KpiId, grpc_context : grpc.ServicerContext) -> KpiDescriptor:
-        LOGGER.info('getting Kpi by KpiID')
+        LOGGER.info('MonitorKpi')
         try:
-            kpi_db = self.sql_db.get_KPI(int(request.kpi_id.uuid))
-            #LOGGER.info('sql_db.get_KPIS={:s}'.format(str(self.sql_db.get_KPIS())))
-            #LOGGER.info('kpi_db={:s}'.format(str(kpi_db)))
-            if kpi_db is None: return None
-
-            kpiDescriptor = KpiDescriptor()
-
-            kpiDescriptor.kpi_description                   = kpi_db[1]
-            kpiDescriptor.kpi_sample_type                   = kpi_db[2]
-            kpiDescriptor.device_id.device_uuid.uuid        = str(kpi_db[3])
-            kpiDescriptor.endpoint_id.endpoint_uuid.uuid    = str(kpi_db[4])
-            kpiDescriptor.service_id.service_uuid.uuid      = str(kpi_db[5])
-
-            return kpiDescriptor
+            kpi_id = int(request.kpi_id.kpi_id.uuid)
+            kpi = self.management_db.get_KPI(kpi_id)
+            response = Empty()
+
+            if kpi:
+                # Sets the request to send to the device service
+                monitor_device_request = MonitoringSettings()
+
+                kpiDescriptor = self.GetKpiDescriptor(request.kpi_id, grpc_context)
+
+                monitor_device_request.kpi_descriptor.CopyFrom(kpiDescriptor)
+                monitor_device_request.kpi_id.kpi_id.uuid = request.kpi_id.kpi_id.uuid
+                monitor_device_request.sampling_duration_s = request.monitoring_window_s
+                monitor_device_request.sampling_interval_s = request.sampling_rate_s
+
+                device_client = DeviceClient()
+                device_client.MonitorDeviceKpi(monitor_device_request)
+            else:
+                LOGGER.info('MonitorKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+            return response
         except ServiceException as e:
-            LOGGER.exception('GetKpiDescriptor exception')
+            LOGGER.exception('MonitorKpi exception')
+            # CREATEKPI_COUNTER_FAILED.inc()
             grpc_context.abort(e.code, e.details)
+        except Exception as e:  # pragma: no cover
+            LOGGER.exception('MonitorKpi exception')
+            grpc_context.abort(grpc.StatusCode.INTERNAL, str(e))
+            # CREATEKPI_COUNTER_FAILED.inc()
 
-        except Exception:  # pragma: no cover
-            LOGGER.exception('GetKpiDescriptor exception')
-
-    def QueryKpiData ( self, request : KpiQuery, grpc_context : grpc.ServicerContext) -> KpiList:
+    def QueryKpiData(self, request: KpiQuery, grpc_context: grpc.ServicerContext) -> KpiList:
 
         LOGGER.info('QueryKpiData')
         try:
-             # TBC
-            return KpiQuery()
+            # TBC
+            return KpiList()
         except ServiceException as e:
             LOGGER.exception('QueryKpiData exception')
             grpc_context.abort(e.code, e.details)
         except Exception as e:  # pragma: no cover
             LOGGER.exception('QueryKpiData exception')
 
-    def SubscribeKpi ( self, request : SubsDescriptor, grpc_context : grpc.ServicerContext) -> KpiList:
+    def SetKpiSubscription(self, request: SubsDescriptor, grpc_context: grpc.ServicerContext) -> SubsResponse:
 
         LOGGER.info('SubscribeKpi')
         try:
-             # TBC
-            yield KpiList()
+
+            subs_queue = Queue()
+            subs_response = SubsResponse()
+
+            kpi_id = request.kpi_id.kpi_id.uuid
+            sampling_duration_s = request.sampling_duration_s
+            sampling_interval_s = request.sampling_interval_s
+            start_timestamp = request.start_timestamp.timestamp
+            end_timestamp = request.end_timestamp.timestamp
+
+            subscriber = "localhost"  # Investigate how to get info from the requester
+
+            subs_id = self.management_db.insert_subscription(kpi_id, subscriber, sampling_duration_s,
+                                                             sampling_interval_s, start_timestamp, end_timestamp)
+            self.subs_manager.create_subscription(subs_queue, subs_id, kpi_id, sampling_interval_s, sampling_duration_s,
+                                                  start_timestamp, end_timestamp)
+
+            # parse queue to append kpis into the list
+            while not subs_queue.empty():
+                list = subs_queue.get_nowait()
+                for item in list:
+                    kpi = Kpi()
+                    kpi.kpi_id.kpi_id.uuid = str(item[0])
+                    kpi.timestamp.timestamp = timestamp_string_to_float(item[1])
+                    kpi.kpi_value.floatVal = item[2]  # This must be improved
+                    subs_response.kpi_list.kpi.append(kpi)
+
+            subs_response.subs_id.subs_id.uuid = str(subs_id)
+
+            yield subs_response
         except ServiceException as e:
             LOGGER.exception('SubscribeKpi exception')
             grpc_context.abort(e.code, e.details)
         except Exception as e:  # pragma: no cover
             LOGGER.exception('SubscribeKpi exception')
 
-
-    def GetSubsDescriptor ( self, request : SubscriptionID, grpc_context : grpc.ServicerContext) -> SubsDescriptor:
+    def GetSubsDescriptor(self, request: SubscriptionID, grpc_context: grpc.ServicerContext) -> SubsDescriptor:
 
         LOGGER.info('GetSubsDescriptor')
         try:
-             # TBC
-            return SubsDescriptor()
+            subs_id = request.subs_id.uuid
+            subs_db = self.management_db.get_subscription(int(request.subs_id.uuid))
+            response = SubsDescriptor()
+            if subs_db is None:
+                LOGGER.info('GetSubsDescriptor error: SubsID({:s}): not found in database'.format(str(subs_id)))
+            else:
+                LOGGER.debug(subs_db)
+                response.subs_id.subs_id.uuid = str(subs_db[0])
+                response.kpi_id.kpi_id.uuid = str(subs_db[1])
+                response.sampling_duration_s = subs_db[3]
+                response.sampling_interval_s = subs_db[4]
+                response.start_timestamp.timestamp = subs_db[5]
+                response.end_timestamp.timestamp = subs_db[6]
+
+            return response
         except ServiceException as e:
             LOGGER.exception('GetSubsDescriptor exception')
             grpc_context.abort(e.code, e.details)
         except Exception as e:  # pragma: no cover
             LOGGER.exception('GetSubsDescriptor exception')
 
-    def GetSubscriptions ( self, request : Empty, grpc_context : grpc.ServicerContext) -> SubsIDList:
+    def GetSubscriptions(self, request: Empty, grpc_context: grpc.ServicerContext) -> SubsList:
 
         LOGGER.info('GetSubscriptions')
         try:
-             # TBC
-            return SubsIDList()
+            response = SubsList()
+            data = self.management_db.get_subscriptions()
+
+            for subs_db in data:
+                subs_descriptor = SubsDescriptor()
+
+                subs_descriptor.subs_id.subs_id.uuid = str(subs_db[0])
+                subs_descriptor.kpi_id.kpi_id.uuid = str(subs_db[1])
+                subs_descriptor.sampling_duration_s = subs_db[3]
+                subs_descriptor.sampling_interval_s = subs_db[4]
+                subs_descriptor.start_timestamp.timestamp = subs_db[5]
+                subs_descriptor.end_timestamp.timestamp = subs_db[6]
+
+                response.subs_descriptor.append(subs_descriptor)
+
+            return response
         except ServiceException as e:
             LOGGER.exception('GetSubscriptions exception')
             grpc_context.abort(e.code, e.details)
         except Exception as e:  # pragma: no cover
             LOGGER.exception('GetSubscriptions exception')
 
-    def DeleteSubscription ( self, request : SubscriptionID, grpc_context : grpc.ServicerContext) -> Empty:
+    def DeleteSubscription(self, request: SubscriptionID, grpc_context: grpc.ServicerContext) -> Empty:
 
         LOGGER.info('DeleteSubscription')
         try:
-             # TBC
+            LOGGER.debug(f'DeleteSubscription with SubsID: {request.subs_id.uuid}')
+            subs_id = int(request.subs_id.uuid)
+            subs_db = self.management_db.get_subscription(int(request.subs_id.uuid))
+            if subs_db:
+                self.management_db.delete_subscription(subs_id)
+            else:
+                LOGGER.info('DeleteSubscription error: SubsID({:s}): not found in database'.format(str(subs_id)))
             return Empty()
         except ServiceException as e:
             LOGGER.exception('DeleteSubscription exception')
@@ -275,63 +355,211 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer):
         except Exception as e:  # pragma: no cover
             LOGGER.exception('DeleteSubscription exception')
 
-    def SetKpiAlarm ( self, request : AlarmDescriptor, grpc_context : grpc.ServicerContext) -> AlarmResponse:
+    def SetKpiAlarm(self, request: AlarmDescriptor, grpc_context: grpc.ServicerContext) -> AlarmResponse:
 
         LOGGER.info('SetKpiAlarm')
         try:
-             # TBC
-            return AlarmResponse()
+            response = AlarmID()
+
+            alarm_description = request.alarm_description
+            alarm_name = request.name
+            kpi_id = request.kpi_id.kpi_id.uuid
+            kpi_min_value = request.kpi_value_range.kpiMinValue.floatVal
+            kpi_max_value = request.kpi_value_range.kpiMaxValue.floatVal
+            in_range = request.kpi_value_range.inRange
+            include_min_value = request.kpi_value_range.includeMinValue
+            include_max_value = request.kpi_value_range.includeMaxValue
+            timestamp = request.timestamp.timestamp
+
+            LOGGER.debug(f"request.AlarmID: {request.alarm_id.alarm_id.uuid}")
+
+            if request.alarm_id.alarm_id.uuid is not "":
+                alarm_id = request.alarm_id.alarm_id.uuid
+            #     Here the code to modify an existing alarm
+            else:
+                alarm_id = self.management_db.insert_alarm(alarm_description, alarm_name, kpi_id, kpi_min_value,
+                                                           kpi_max_value,
+                                                           in_range, include_min_value, include_max_value)
+                LOGGER.debug(f"AlarmID: {alarm_id}")
+            response.alarm_id.uuid = str(alarm_id)
+
+            return response
         except ServiceException as e:
             LOGGER.exception('SetKpiAlarm exception')
             grpc_context.abort(e.code, e.details)
         except Exception as e:  # pragma: no cover
             LOGGER.exception('SetKpiAlarm exception')
 
-
-    def GetAlarms ( self, request : Empty, grpc_context : grpc.ServicerContext) -> AlarmIDList:
+    def GetAlarms(self, request: Empty, grpc_context: grpc.ServicerContext) -> AlarmList:
 
         LOGGER.info('GetAlarms')
         try:
-             # TBC
-            return AlarmIDList()
+            response = AlarmList()
+            data = self.management_db.get_alarms()
+
+            for alarm in data:
+                alarm_descriptor = AlarmDescriptor()
+
+                alarm_descriptor.alarm_id.alarm_id.uuid = str(alarm[0])
+                alarm_descriptor.alarm_description = alarm[1]
+                alarm_descriptor.name = alarm[2]
+                alarm_descriptor.kpi_id.kpi_id.uuid = str(alarm[3])
+                alarm_descriptor.kpi_value_range.kpiMinValue.floatVal = alarm[4]
+                alarm_descriptor.kpi_value_range.kpiMaxValue.floatVal = alarm[5]
+                alarm_descriptor.kpi_value_range.inRange = bool(alarm[6])
+                alarm_descriptor.kpi_value_range.includeMinValue = bool(alarm[7])
+                alarm_descriptor.kpi_value_range.includeMaxValue = bool(alarm[8])
+
+                response.alarm_descriptor.append(alarm_descriptor)
+
+            return response
         except ServiceException as e:
             LOGGER.exception('GetAlarms exception')
             grpc_context.abort(e.code, e.details)
         except Exception as e:  # pragma: no cover
             LOGGER.exception('GetAlarms exception')
 
-    def GetAlarmDescriptor ( self, request : AlarmID, grpc_context : grpc.ServicerContext) -> AlarmDescriptor:
+    def GetAlarmDescriptor(self, request: AlarmID, grpc_context: grpc.ServicerContext) -> AlarmDescriptor:
 
         LOGGER.info('GetAlarmDescriptor')
         try:
-             # TBC
-            return AlarmDescriptor()
+            alarm_id = request.alarm_id.uuid
+            alarm = self.management_db.get_alarm(alarm_id)
+            response = AlarmDescriptor()
+
+            if alarm:
+                LOGGER.debug(f"{alarm}")
+                response.alarm_id.alarm_id.uuid = str(alarm_id)
+                response.alarm_description = alarm[1]
+                response.name = alarm[2]
+                response.kpi_id.kpi_id.uuid = str(alarm[3])
+                response.kpi_value_range.kpiMinValue.floatVal = alarm[4]
+                response.kpi_value_range.kpiMaxValue.floatVal = alarm[5]
+                response.kpi_value_range.inRange = bool(alarm[6])
+                response.kpi_value_range.includeMinValue = bool(alarm[7])
+                response.kpi_value_range.includeMaxValue = bool(alarm[8])
+            else:
+                LOGGER.info('GetAlarmDescriptor error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
+                response.alarm_id.alarm_id.uuid = "NoID"
+            return response
         except ServiceException as e:
             LOGGER.exception('GetAlarmDescriptor exception')
             grpc_context.abort(e.code, e.details)
         except Exception as e:  # pragma: no cover
             LOGGER.exception('GetAlarmDescriptor exception')
 
-    def GetAlarmResponseStream(self, request : AlarmSubscription, grpc_context : grpc.ServicerContext) -> Iterator[AlarmResponse]:
+    def GetAlarmResponseStream(self, request: AlarmSubscription, grpc_context: grpc.ServicerContext) -> Iterator[
+        AlarmResponse]:
 
         LOGGER.info('GetAlarmResponseStream')
         try:
-            # TBC
-            yield AlarmResponse()
+            alarm_id = request.alarm_id.alarm_id.uuid
+            alarm = self.management_db.get_alarm(alarm_id)
+            alarm_response = AlarmResponse()
+
+            if alarm:
+
+                alarm_queue = Queue()
+
+                alarm_data = self.management_db.get_alarm(alarm)
+
+                alarm_id = request.alarm_id.alarm_id.uuid
+                kpi_id = alarm_data[3]
+                kpiMinValue = alarm_data[4]
+                kpiMaxValue = alarm_data[5]
+                inRange = alarm_data[6]
+                includeMinValue = alarm_data[7]
+                includeMaxValue = alarm_data[8]
+                subscription_frequency_ms = request.subscription_frequency_ms
+                subscription_timeout_s = request.subscription_timeout_s
+
+                self.alarm_manager.create_alarm(alarm_queue, alarm_id, kpi_id, kpiMinValue, kpiMaxValue, inRange,
+                                                includeMinValue, includeMaxValue, subscription_frequency_ms,
+                                                subscription_timeout_s)
+
+                while not alarm_queue.empty():
+                    list = alarm_queue.get_nowait()
+                    for item in list:
+                        kpi = Kpi()
+                        kpi.kpi_id.kpi_id.uuid = str(item[0])
+                        kpi.timestamp.timestamp = timestamp_string_to_float(item[1])
+                        kpi.kpi_value.floatVal = item[2]  # This must be improved
+                        alarm_response.kpi_list.kpi.append(kpi)
+
+                alarm_response.alarm_id.alarm_id.uuid = alarm_id
+
+                yield alarm_response
+            else:
+                LOGGER.info('GetAlarmResponseStream error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
+                alarm_response.alarm_id.alarm_id.uuid = "NoID"
+                return alarm_response
         except ServiceException as e:
             LOGGER.exception('GetAlarmResponseStream exception')
             grpc_context.abort(e.code, e.details)
         except Exception as e:  # pragma: no cover
             LOGGER.exception('GetAlarmResponseStream exception')
 
-    def DeleteAlarm ( self, request : AlarmID, grpc_context : grpc.ServicerContext) -> Empty:
+    def DeleteAlarm(self, request: AlarmID, grpc_context: grpc.ServicerContext) -> Empty:
 
         LOGGER.info('DeleteAlarm')
         try:
-             # TBC
-            return Empty()
+            LOGGER.debug(f'DeleteAlarm with AlarmID: {request.alarm_id.uuid}')
+            alarm_id = int(request.alarm_id.uuid)
+            alarm = self.management_db.get_alarm(alarm_id)
+            response = Empty()
+            if alarm:
+                self.alarm_manager.delete_alarm(alarm_id)
+                self.management_db.delete_alarm(alarm_id)
+            else:
+                LOGGER.info('DeleteAlarm error: AlarmID({:s}): not found in database'.format(str(alarm_id)))
+            return response
         except ServiceException as e:
             LOGGER.exception('DeleteAlarm exception')
             grpc_context.abort(e.code, e.details)
         except Exception as e:  # pragma: no cover
             LOGGER.exception('DeleteAlarm exception')
+
+    def GetStreamKpi(self, request: KpiId, grpc_context: grpc.ServicerContext) -> Iterator[Kpi]:
+
+        LOGGER.info('GetStreamKpi')
+
+        kpi_id = request.kpi_id.uuid
+        kpi_db = self.management_db.get_KPI(int(kpi_id))
+        response = Kpi()
+        if kpi_db is None:
+            LOGGER.info('GetInstantKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+            response.kpi_id.kpi_id.uuid = "NoID"
+            return response
+        else:
+            yield response
+
+    @MONITORING_GETINSTANTKPI_REQUEST_TIME.time()
+    def GetInstantKpi(self, request: KpiId, grpc_context: grpc.ServicerContext) -> Kpi:
+
+        LOGGER.info('GetInstantKpi')
+        try:
+            kpi_id = request.kpi_id.uuid
+            response = Kpi()
+            if kpi_id is "":
+                LOGGER.info('GetInstantKpi error: KpiID({:s}): not found in database'.format(str(kpi_id)))
+                response.kpi_id.kpi_id.uuid = "NoID"
+            else:
+                query = f"SELECT kpi_id, timestamp, kpi_value FROM monitoring WHERE kpi_id = '{kpi_id}' " \
+                        f"LATEST ON timestamp PARTITION BY kpi_id"
+                data = self.metrics_db.run_query(query)[0]
+                LOGGER.debug(data)
+
+                response.kpi_id.kpi_id.uuid = str(data[0])
+                response.timestamp.timestamp = timestamp_string_to_float(data[1])
+                response.kpi_value.floatVal = data[2]  # This must be improved
+
+            return response
+        except ServiceException as e:
+            LOGGER.exception('SetKpi exception')
+            # CREATEKPI_COUNTER_FAILED.inc()
+            grpc_context.abort(e.code, e.details)
+        except Exception as e:  # pragma: no cover
+            LOGGER.exception('SetKpi exception')
+            # CREATEKPI_COUNTER_FAILED.inc()
+            grpc_context.abort(grpc.StatusCode.INTERNAL, str(e))
+
diff --git a/src/monitoring/service/SqliteTools.py b/src/monitoring/service/SqliteTools.py
deleted file mode 100644
index 092d07e9b961e98a91bb244bcc992c701ad3cd72..0000000000000000000000000000000000000000
--- a/src/monitoring/service/SqliteTools.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sqlite3 as sl
-
-class SQLite():
-    def __init__(self, database):
-        self.client = sl.connect(database, check_same_thread=False)
-        self.client.execute("""
-            CREATE TABLE IF NOT EXISTS KPI(
-                kpi_id INTEGER PRIMARY KEY AUTOINCREMENT,
-                kpi_description TEXT,
-                kpi_sample_type INTEGER,
-                device_id INTEGER,
-                endpoint_id INTEGER,
-                service_id INTEGER
-            );
-        """)
-
-    def insert_KPI(self,kpi_description,kpi_sample_type,device_id,endpoint_id,service_id ):
-        c = self.client.cursor()
-        c.execute("SELECT kpi_id FROM KPI WHERE device_id is ? AND kpi_sample_type is ? AND endpoint_id is ?",(device_id,kpi_sample_type,endpoint_id))
-        data=c.fetchone()
-        if data is None:
-            c.execute("INSERT INTO KPI (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id) VALUES (?,?,?,?,?)", (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id))
-            self.client.commit()
-            return c.lastrowid
-        else:
-            return data[0]
-
-    def delete_KPI(self,device_id,kpi_sample_type):
-        c = self.client.cursor()
-        c.execute("SELECT kpi_id FROM KPI WHERE device_id is ? AND kpi_sample_type is ?",(device_id,kpi_sample_type))       
-        data=c.fetchone()
-        if data is None:
-            return False
-        else:
-            c.execute("DELETE FROM KPI WHERE device_id is ? AND kpi_sample_type is ?",(device_id,kpi_sample_type))
-            self.client.commit()
-            return True
-
-    def delete_kpid_id(self,kpi_id):
-        c = self.client.cursor()
-        c.execute("SELECT * FROM KPI WHERE kpi_id is ?",(kpi_id,))       
-        data=c.fetchone()
-        if data is None:
-            return False
-        else:
-            c.execute("DELETE FROM KPI WHERE kpi_id is ?",(kpi_id,))
-            self.client.commit()
-            return True
-
-    def get_KPI(self,kpi_id):
-        data = self.client.execute("SELECT * FROM KPI WHERE kpi_id is ?",(kpi_id,))
-        return data.fetchone()
-        
-    def get_KPIS(self):
-        data = self.client.execute("SELECT * FROM KPI")
-        #print("\n")
-        #for row in data:
-        #    print(row)
-        return data.fetchall()
\ No newline at end of file
diff --git a/src/monitoring/service/SubscriptionManager.py b/src/monitoring/service/SubscriptionManager.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe27d6ee365676b05175b762a106621121e3b897
--- /dev/null
+++ b/src/monitoring/service/SubscriptionManager.py
@@ -0,0 +1,55 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+import pytz
+from apscheduler.executors.pool import ProcessPoolExecutor
+from apscheduler.schedulers.background import BackgroundScheduler
+
+from datetime import datetime
+import time
+
+
+LOGGER = logging.getLogger(__name__)
+
+class SubscriptionManager():
+    def __init__(self, metrics_db):
+        self.metrics_db = metrics_db
+        self.scheduler = BackgroundScheduler(executors={'processpool': ProcessPoolExecutor(max_workers=20)})
+        self.scheduler.start()
+        
+    def create_subscription(self,subs_queue, subscription_id, kpi_id, sampling_interval_s, sampling_duration_s=None, start_timestamp=None, end_timestamp=None):
+        start_date = None
+        end_date = None
+        if sampling_duration_s:
+            if not start_timestamp:
+                start_timestamp = time.time()
+            end_timestamp = start_timestamp + sampling_duration_s
+        if start_timestamp:
+            start_date = datetime.utcfromtimestamp(start_timestamp).isoformat()
+        if end_timestamp:
+            end_date = datetime.utcfromtimestamp(end_timestamp).isoformat()
+
+        LOGGER.debug(f"kpi_id: {kpi_id}")
+        LOGGER.debug(f"sampling_interval_s: {sampling_interval_s}")
+        LOGGER.debug(f"subscription_id: {subscription_id}")
+        LOGGER.debug(f"start_date: {start_date}")
+        self.scheduler.add_job(self.metrics_db.get_subscription_data, args=(subs_queue,kpi_id, sampling_interval_s),
+                               trigger='interval', seconds=sampling_interval_s, start_date=start_date,
+                               end_date=end_date, timezone=pytz.utc, id=str(subscription_id))
+        LOGGER.debug(f"Subscrition job {subscription_id} succesfully created")
+
+    def delete_subscription(self, subscription_id):
+        self.scheduler.remove_job(subscription_id)
\ No newline at end of file
diff --git a/src/monitoring/tests/Messages.py b/src/monitoring/tests/Messages.py
index cf81ceed1e134240415ec1aabe8796cd4486f75f..845153856c44cec0576bd6f11b045e3310558a97 100644
--- a/src/monitoring/tests/Messages.py
+++ b/src/monitoring/tests/Messages.py
@@ -12,11 +12,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import datetime
+from random import random
 
 from common.proto import monitoring_pb2
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
-from common.tools.timestamp.Converters import timestamp_string_to_float, timestamp_utcnow_to_float
-
+from common.tools.timestamp.Converters import timestamp_utcnow_to_float
 
 def kpi_id():
     _kpi_id             = monitoring_pb2.KpiId()
@@ -32,6 +32,24 @@ def create_kpi_request():
     _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END1'     # pylint: disable=maybe-no-member
     return _create_kpi_request
 
+def create_kpi_request_b():
+    _create_kpi_request                                = monitoring_pb2.KpiDescriptor()
+    _create_kpi_request.kpi_description                = 'KPI Description Test'
+    _create_kpi_request.kpi_sample_type                = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
+    _create_kpi_request.device_id.device_uuid.uuid     = 'DEV2'     # pylint: disable=maybe-no-member
+    _create_kpi_request.service_id.service_uuid.uuid   = 'SERV2'    # pylint: disable=maybe-no-member
+    _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END2'     # pylint: disable=maybe-no-member
+    return _create_kpi_request
+
+def create_kpi_request_c():
+    _create_kpi_request                                = monitoring_pb2.KpiDescriptor()
+    _create_kpi_request.kpi_description                = 'KPI Description Test'
+    _create_kpi_request.kpi_sample_type                = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
+    _create_kpi_request.device_id.device_uuid.uuid     = 'DEV3'     # pylint: disable=maybe-no-member
+    _create_kpi_request.service_id.service_uuid.uuid   = 'SERV3'    # pylint: disable=maybe-no-member
+    _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END3'     # pylint: disable=maybe-no-member
+    return _create_kpi_request
+
 def monitor_kpi_request(kpi_uuid, monitoring_window_s, sampling_rate_s):
     _monitor_kpi_request                     = monitoring_pb2.MonitorKpiRequest()
     _monitor_kpi_request.kpi_id.kpi_id.uuid  = kpi_uuid   # pylint: disable=maybe-no-member
@@ -43,5 +61,66 @@ def include_kpi_request(kpi_id):
     _include_kpi_request                        = monitoring_pb2.Kpi()
     _include_kpi_request.kpi_id.kpi_id.uuid     = kpi_id.kpi_id.uuid
     _include_kpi_request.timestamp.timestamp    = timestamp_utcnow_to_float()
-    _include_kpi_request.kpi_value.int32Val     = 500       # pylint: disable=maybe-no-member
+    _include_kpi_request.kpi_value.floatVal     = 500*random()       # pylint: disable=maybe-no-member
     return _include_kpi_request
+
+def kpi_descriptor_list():
+    _kpi_descriptor_list = monitoring_pb2.KpiDescriptorList()
+
+    return _kpi_descriptor_list
+
+def kpi_query():
+    _kpi_query = monitoring_pb2.KpiQuery()
+
+    return _kpi_query
+
+def subs_descriptor(kpi_id):
+    _subs_descriptor = monitoring_pb2.SubsDescriptor()
+
+    _subs_descriptor.subs_id.subs_id.uuid       = ""
+    _subs_descriptor.kpi_id.kpi_id.uuid         = kpi_id.kpi_id.uuid
+    _subs_descriptor.sampling_duration_s        = 10
+    _subs_descriptor.sampling_interval_s        = 2
+    _subs_descriptor.start_timestamp.timestamp  = timestamp_utcnow_to_float()
+    _subs_descriptor.end_timestamp.timestamp    = timestamp_utcnow_to_float() + 10
+
+    return _subs_descriptor
+
+def subs_id():
+    _subs_id = monitoring_pb2.SubsDescriptor()
+
+    return _subs_id
+
+def alarm_descriptor():
+    _alarm_descriptor = monitoring_pb2.AlarmDescriptor()
+
+    _alarm_descriptor.alarm_description                     = "Alarm Description"
+    _alarm_descriptor.name                                  = "Alarm Name"
+    _alarm_descriptor.kpi_id.kpi_id.uuid                    = "1"
+    _alarm_descriptor.kpi_value_range.kpiMinValue.floatVal  = 0.0
+    _alarm_descriptor.kpi_value_range.kpiMaxValue.floatVal  = 50.0
+    _alarm_descriptor.kpi_value_range.inRange               = True
+    _alarm_descriptor.kpi_value_range.includeMinValue       = False
+    _alarm_descriptor.kpi_value_range.includeMaxValue       = True
+
+    return _alarm_descriptor
+
+def alarm_descriptor_b():
+    _alarm_descriptor = monitoring_pb2.AlarmDescriptor()
+
+    _alarm_descriptor.kpi_id.kpi_id.uuid = "2"
+
+    return _alarm_descriptor
+
+def alarm_subscription(alarm_id):
+    _alarm_descriptor = monitoring_pb2.AlarmSubscription()
+
+    _alarm_descriptor.alarm_id.alarm_id.uuid = str(alarm_id)
+
+    return _alarm_descriptor
+
+
+def alarm_id():
+    _alarm_id = monitoring_pb2.AlarmID()
+
+    return _alarm_id
\ No newline at end of file
diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py
index b62b5f97f965beb75ddaafa122ac8f026faab686..ee6a29e8a483fe53c58a6e6d2e3aa240f2456b81 100644
--- a/src/monitoring/tests/test_unitary.py
+++ b/src/monitoring/tests/test_unitary.py
@@ -13,8 +13,15 @@
 # limitations under the License.
 
 import copy, os, pytest
+import threading
+import time
 from time import sleep
 from typing import Tuple
+
+from apscheduler.executors.pool import ProcessPoolExecutor
+from apscheduler.schedulers.background import BackgroundScheduler
+from grpc._channel import _MultiThreadedRendezvous
+
 from common.Constants import ServiceNameEnum
 from common.Settings import (
     ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
@@ -24,7 +31,9 @@ from common.orm.Factory import get_database_backend, BackendEnum as DatabaseBack
 from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum
 from common.message_broker.MessageBroker import MessageBroker
 from common.proto import monitoring_pb2
-from common.proto.monitoring_pb2 import KpiId, KpiDescriptor
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.proto.monitoring_pb2 import KpiId, KpiDescriptor, KpiList, SubsDescriptor, SubsList, AlarmID, \
+    AlarmDescriptor, AlarmList, Kpi, KpiDescriptorList, SubsResponse, AlarmResponse
 
 from context.client.ContextClient import ContextClient
 from context.service.grpc_server.ContextService import ContextService
@@ -38,19 +47,17 @@ from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
 os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE'
 from device.service.drivers import DRIVERS  # pylint: disable=wrong-import-position
 
-# pylint: disable=wrong-import-position
 from monitoring.client.MonitoringClient import MonitoringClient
-from common.proto.kpi_sample_types_pb2 import KpiSampleType
-from monitoring.service import SqliteTools, MetricsDBTools
+from monitoring.service import ManagementDBTools, MetricsDBTools
 from monitoring.service.MonitoringService import MonitoringService
 from monitoring.service.EventTools import EventsDeviceCollector
-from monitoring.tests.Messages import create_kpi_request, include_kpi_request, monitor_kpi_request
+from monitoring.tests.Messages import create_kpi_request, include_kpi_request, monitor_kpi_request, \
+    create_kpi_request_b, create_kpi_request_c, kpi_query, subs_descriptor, alarm_descriptor, \
+    alarm_subscription
 from monitoring.tests.Objects import DEVICE_DEV1, DEVICE_DEV1_CONNECT_RULES, DEVICE_DEV1_UUID
 
 from monitoring.service.MonitoringServiceServicerImpl import LOGGER
 
-# LOGGER = getJSONLogger('monitoringservice-server')
-# LOGGER.setLevel('DEBUG')
 
 ###########################
 # Tests Setup
@@ -151,9 +158,9 @@ def monitoring_client(monitoring_service : MonitoringService): # pylint: disable
     _client.close()
 
 @pytest.fixture(scope='session')
-def sql_db():
-    _sql_db = SqliteTools.SQLite('monitoring.db')
-    return _sql_db
+def management_db():
+    _management_db = ManagementDBTools.ManagementDB('monitoring.db')
+    return _management_db
 
 @pytest.fixture(scope='session')
 def metrics_db():
@@ -161,7 +168,21 @@ def metrics_db():
         METRICSDB_HOSTNAME, METRICSDB_ILP_PORT, METRICSDB_REST_PORT, METRICSDB_TABLE)
     return _metrics_db
 
+@pytest.fixture(scope='session')
+def subs_scheduler():
+    _scheduler = BackgroundScheduler(executors={'processpool': ProcessPoolExecutor(max_workers=20)})
+    _scheduler.start()
+
+    return _scheduler
+
+def ingestion_data(monitoring_client):
+    _kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
+    _include_kpi_request = include_kpi_request(_kpi_id)
 
+    for i in range(200):
+        _include_kpi_request = include_kpi_request(_kpi_id)
+        monitoring_client.IncludeKpi(_include_kpi_request)
+        time.sleep(0.01)
 
 ###########################
 # Tests Implementation
@@ -173,8 +194,44 @@ def test_set_kpi(monitoring_client): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_create_kpi requesting')
     response = monitoring_client.SetKpi(create_kpi_request())
     LOGGER.debug(str(response))
+    response = monitoring_client.SetKpi(create_kpi_request_b())
+    LOGGER.debug(str(response))
     assert isinstance(response, KpiId)
 
+
+# Test case that makes use of client fixture to test server's DeleteKpi method
+def test_delete_kpi(monitoring_client): # pylint: disable=redefined-outer-name
+    # make call to server
+    LOGGER.warning('delete_kpi requesting')
+    response = monitoring_client.SetKpi(create_kpi_request_b())
+    response = monitoring_client.DeleteKpi(response)
+    LOGGER.debug(str(response))
+    assert isinstance(response, Empty)
+
+# Test case that makes use of client fixture to test server's GetKpiDescriptor method
+def test_get_kpidescritor(monitoring_client): # pylint: disable=redefined-outer-name
+    LOGGER.warning('test_getkpidescritor_kpi begin')
+    response = monitoring_client.SetKpi(create_kpi_request_c())
+    response = monitoring_client.GetKpiDescriptor(response)
+    LOGGER.debug(str(response))
+    assert isinstance(response, KpiDescriptor)
+
+# Test case that makes use of client fixture to test server's GetKpiDescriptor method
+def test_get_kpi_descriptor_list(monitoring_client): # pylint: disable=redefined-outer-name
+    LOGGER.warning('test_getkpidescritor_kpi begin')
+    response = monitoring_client.GetKpiDescriptorList(Empty())
+    LOGGER.debug(str(response))
+    assert isinstance(response, KpiDescriptorList)
+
+# Test case that makes use of client fixture to test server's IncludeKpi method
+def test_include_kpi(monitoring_client): # pylint: disable=redefined-outer-name
+    # make call to server
+    LOGGER.warning('test_include_kpi requesting')
+    kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
+    response = monitoring_client.IncludeKpi(include_kpi_request(kpi_id))
+    LOGGER.debug(str(response))
+    assert isinstance(response, Empty)
+
 # Test case that makes use of client fixture to test server's MonitorKpi method
 def test_monitor_kpi(
         context_client : ContextClient,                 # pylint: disable=redefined-outer-name
@@ -210,13 +267,105 @@ def test_monitor_kpi(
     LOGGER.debug(str(response))
     assert isinstance(response, Empty)
 
-
-# Test case that makes use of client fixture to test server's IncludeKpi method
-def test_include_kpi(monitoring_client): # pylint: disable=redefined-outer-name
-    # make call to server
-    LOGGER.warning('test_include_kpi requesting')
-    kpi_id = monitoring_client.SetKpi(create_kpi_request())
-    response = monitoring_client.IncludeKpi(include_kpi_request(kpi_id))
+# Test case that makes use of client fixture to test server's QueryKpiData method
+def test_query_kpi_data(monitoring_client): # pylint: disable=redefined-outer-name
+    LOGGER.warning('test_query_kpi_data')
+    response = monitoring_client.QueryKpiData(kpi_query())
+    LOGGER.debug(str(response))
+    assert isinstance(response, KpiList)
+
+def test_ingestion_data(monitoring_client):
+    _kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
+    _include_kpi_request = include_kpi_request(_kpi_id)
+
+    for i in range(100):
+        _include_kpi_request = include_kpi_request(_kpi_id)
+        monitoring_client.IncludeKpi(_include_kpi_request)
+        time.sleep(0.01)
+
+# def test_subscription_scheduler(monitoring_client,metrics_db,subs_scheduler):
+#     subs_scheduler.add_job(ingestion_data(monitoring_client),id="1")
+
+# Test case that makes use of client fixture to test server's SetKpiSubscription method
+def test_set_kpi_subscription(monitoring_client,metrics_db): # pylint: disable=redefined-outer-name
+    LOGGER.warning('test_set_kpi_subscription')
+    kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
+    # thread = threading.Thread(target=test_ingestion_data, args=(monitoring_client,metrics_db))
+    # thread.start()
+    monitoring_client.IncludeKpi(include_kpi_request(kpi_id))
+    response = monitoring_client.SetKpiSubscription(subs_descriptor(kpi_id))
+    assert isinstance(response, _MultiThreadedRendezvous)
+    LOGGER.debug(response)
+    for item in response:
+        LOGGER.debug(item)
+        assert isinstance(item, SubsResponse)
+
+# Test case that makes use of client fixture to test server's GetSubsDescriptor method
+def test_get_subs_descriptor(monitoring_client):
+    LOGGER.warning('test_get_subs_descriptor')
+    kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
+    monitoring_client.IncludeKpi(include_kpi_request(kpi_id))
+    response = monitoring_client.SetKpiSubscription(subs_descriptor(kpi_id))
+    for item in response:
+        response = monitoring_client.GetSubsDescriptor(item.subs_id)
+        LOGGER.debug(response)
+        assert isinstance(response, SubsDescriptor)
+
+# Test case that makes use of client fixture to test server's GetSubscriptions method
+def test_get_subscriptions(monitoring_client):
+    LOGGER.warning('test_get_subscriptions')
+    response = monitoring_client.GetSubscriptions(Empty())
+    LOGGER.debug(response)
+    assert isinstance(response, SubsList)
+
+# Test case that makes use of client fixture to test server's DeleteSubscription method
+def test_delete_subscription(monitoring_client):
+    LOGGER.warning('test_delete_subscription')
+    kpi_id = monitoring_client.SetKpi(create_kpi_request_c())
+    monitoring_client.IncludeKpi(include_kpi_request(kpi_id))
+    subs = monitoring_client.SetKpiSubscription(subs_descriptor(kpi_id))
+    for item in subs:
+        response = monitoring_client.DeleteSubscription(item.subs_id)
+        assert isinstance(response, Empty)
+
+# Test case that makes use of client fixture to test server's SetKpiAlarm method
+def test_set_kpi_alarm(monitoring_client):
+    LOGGER.warning('test_set_kpi_alarm')
+    response = monitoring_client.SetKpiAlarm(alarm_descriptor())
+    LOGGER.debug(str(response))
+    assert isinstance(response, AlarmID)
+
+# Test case that makes use of client fixture to test server's GetAlarms method
+def test_get_alarms(monitoring_client):
+    LOGGER.warning('test_get_alarms')
+    response = monitoring_client.GetAlarms(Empty())
+    LOGGER.debug(response)
+    assert isinstance(response, AlarmList)
+
+# Test case that makes use of client fixture to test server's GetAlarmDescriptor method
+def test_get_alarm_descriptor(monitoring_client):
+    LOGGER.warning('test_get_alarm_descriptor')
+    alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor())
+    response = monitoring_client.GetAlarmDescriptor(alarm_id)
+    LOGGER.debug(response)
+    assert isinstance(response, AlarmDescriptor)
+
+# Test case that makes use of client fixture to test server's GetAlarmResponseStream method
+def test_get_alarm_response_stream(monitoring_client):
+    LOGGER.warning('test_get_alarm_descriptor')
+    alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor())
+    response = monitoring_client.GetAlarmResponseStream(alarm_subscription(alarm_id))
+    assert isinstance(response, _MultiThreadedRendezvous)
+    for item in response:
+        LOGGER.debug(response)
+        assert isinstance(item,AlarmResponse)
+
+# Test case that makes use of client fixture to test server's DeleteAlarm method
+def test_delete_alarm(monitoring_client):
+    LOGGER.warning('test_delete_alarm')
+    alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor())
+    response = monitoring_client.DeleteAlarm(alarm_id)
+    LOGGER.debug(type(response))
     assert isinstance(response, Empty)
 
 # Test case that makes use of client fixture to test server's GetStreamKpi method
@@ -224,26 +373,22 @@ def test_get_stream_kpi(monitoring_client): # pylint: disable=redefined-outer-na
     LOGGER.warning('test_getstream_kpi begin')
     response = monitoring_client.GetStreamKpi(monitoring_pb2.Kpi())
     LOGGER.debug(str(response))
-    #assert isinstance(response, Kpi)
+    assert isinstance(response, _MultiThreadedRendezvous)
 
 # Test case that makes use of client fixture to test server's GetInstantKpi method
 # def test_get_instant_kpi(monitoring_client): # pylint: disable=redefined-outer-name
 #     LOGGER.warning('test_getinstant_kpi begin')
-#     response = monitoring_client.GetInstantKpi(kpi_id())
-#     LOGGER.debug(str(response))
-#     # assert isinstance(response, Kpi)
-
-# Test case that makes use of client fixture to test server's GetInstantKpi method
-def test_get_kpidescritor_kpi(monitoring_client): # pylint: disable=redefined-outer-name
-    LOGGER.warning('test_getkpidescritor_kpi begin')
-    response = monitoring_client.SetKpi(create_kpi_request())
-    # LOGGER.debug(str(response))
-    response = monitoring_client.GetKpiDescriptor(response)
-    # LOGGER.debug(str(response))
-    assert isinstance(response, KpiDescriptor)
-
-def test_sqlitedb_tools_insert_kpi(sql_db): # pylint: disable=redefined-outer-name
-    LOGGER.warning('test_sqlitedb_tools_insert_kpi begin')
+#     kpi_id = monitoring_client.SetKpi(KpiId())
+#     monitoring_client.IncludeKpi(include_kpi_request(kpi_id))
+#     sleep(0.3)
+#     response = monitoring_client.GetInstantKpi(kpi_id)
+#     LOGGER.debug(response)
+#     assert isinstance(response, Kpi)
+    # response = monitoring_client.GetInstantKpi(KpiId())
+    # LOGGER.debug(type(response))
+    # assert response.kpi_id.kpi_id.uuid == "NoID"
+def test_managementdb_tools_insert_kpi(management_db): # pylint: disable=redefined-outer-name
+    LOGGER.warning('test_managementdb_tools_insert_kpi begin')
     _create_kpi_request = create_kpi_request()
     kpi_description = _create_kpi_request.kpi_description                # pylint: disable=maybe-no-member
     kpi_sample_type = _create_kpi_request.kpi_sample_type                # pylint: disable=maybe-no-member
@@ -251,11 +396,11 @@ def test_sqlitedb_tools_insert_kpi(sql_db): # pylint: disable=redefined-outer-na
     kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member
     kpi_service_id  = _create_kpi_request.service_id.service_uuid.uuid   # pylint: disable=maybe-no-member
 
-    response = sql_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
+    response = management_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
     assert isinstance(response, int)
 
-def test_sqlitedb_tools_get_kpi(sql_db): # pylint: disable=redefined-outer-name
-    LOGGER.warning('test_sqlitedb_tools_get_kpi begin')
+def test_managementdb_tools_get_kpi(management_db): # pylint: disable=redefined-outer-name
+    LOGGER.warning('test_managementdb_tools_get_kpi begin')
     _create_kpi_request = create_kpi_request()
     kpi_description = _create_kpi_request.kpi_description                # pylint: disable=maybe-no-member
     kpi_sample_type = _create_kpi_request.kpi_sample_type                # pylint: disable=maybe-no-member
@@ -263,52 +408,32 @@ def test_sqlitedb_tools_get_kpi(sql_db): # pylint: disable=redefined-outer-name
     kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member
     kpi_service_id  = _create_kpi_request.service_id.service_uuid.uuid   # pylint: disable=maybe-no-member
 
-    _kpi_id = sql_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
-    response = sql_db.get_KPI(_kpi_id)
+    _kpi_id = management_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
+    response = management_db.get_KPI(_kpi_id)
     assert isinstance(response, tuple)
 
-def test_sqlitedb_tools_get_kpis(sql_db): # pylint: disable=redefined-outer-name
-    LOGGER.warning('test_sqlitedb_tools_get_kpis begin')
-    response = sql_db.get_KPIS()
+def test_managementdb_tools_get_kpis(management_db): # pylint: disable=redefined-outer-name
+    LOGGER.warning('test_managementdb_tools_get_kpis begin')
+    response = management_db.get_KPIS()
     assert isinstance(response, list)
 
-def test_sqlitedb_tools_delete_kpi(sql_db): # pylint: disable=redefined-outer-name
-    LOGGER.warning('test_sqlitedb_tools_get_kpi begin')
-
-    response = sql_db.delete_KPI("DEV1",KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED)
-
-    if not response:
-        _create_kpi_request = create_kpi_request()
-        kpi_description = _create_kpi_request.kpi_description                # pylint: disable=maybe-no-member
-        kpi_sample_type = _create_kpi_request.kpi_sample_type                # pylint: disable=maybe-no-member
-        kpi_device_id   = _create_kpi_request.device_id.device_uuid.uuid     # pylint: disable=maybe-no-member
-        kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member
-        kpi_service_id  = _create_kpi_request.service_id.service_uuid.uuid   # pylint: disable=maybe-no-member
+def test_managementdb_tools_delete_kpi(management_db): # pylint: disable=redefined-outer-name
+    LOGGER.warning('test_managementdb_tools_get_kpi begin')
 
-        sql_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
-        response = sql_db.delete_KPI("DEV1", KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED)
-
-    assert response
-
-def test_sqlitedb_tools_delete_kpid_id(sql_db): # pylint: disable=redefined-outer-name
-    LOGGER.warning('test_sqlitedb_tools_delete_kpid_id begin')
-
-    response = sql_db.delete_kpid_id(1)
+    _create_kpi_request = create_kpi_request()
+    kpi_description = _create_kpi_request.kpi_description  # pylint: disable=maybe-no-member
+    kpi_sample_type = _create_kpi_request.kpi_sample_type  # pylint: disable=maybe-no-member
+    kpi_device_id = _create_kpi_request.device_id.device_uuid.uuid  # pylint: disable=maybe-no-member
+    kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid  # pylint: disable=maybe-no-member
+    kpi_service_id = _create_kpi_request.service_id.service_uuid.uuid  # pylint: disable=maybe-no-member
 
-    if not response:
-        _create_kpi_request = create_kpi_request()
-        kpi_description = _create_kpi_request.kpi_description                # pylint: disable=maybe-no-member
-        kpi_sample_type = _create_kpi_request.kpi_sample_type                # pylint: disable=maybe-no-member
-        kpi_device_id   = _create_kpi_request.device_id.device_uuid.uuid     # pylint: disable=maybe-no-member
-        kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member
-        kpi_service_id  = _create_kpi_request.service_id.service_uuid.uuid   # pylint: disable=maybe-no-member
+    _kpi_id = management_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id,
+                                        kpi_service_id)
 
-        _kpi_id = sql_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id)
-        response = sql_db.delete_kpid_id(_kpi_id)
+    response = management_db.delete_KPI(_kpi_id)
 
     assert response
 
-
 def test_metrics_db_tools_write_kpi(metrics_db): # pylint: disable=redefined-outer-name
     LOGGER.warning('test_metric_sdb_tools_write_kpi begin')
 
diff --git a/src/tests/Fixtures.py b/src/tests/Fixtures.py
new file mode 100644
index 0000000000000000000000000000000000000000..aeead8448651b386f4c69d12c139b6043fe5ef55
--- /dev/null
+++ b/src/tests/Fixtures.py
@@ -0,0 +1,38 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+from common.Settings import get_setting
+from compute.tests.mock_osm.MockOSM import MockOSM
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from monitoring.client.MonitoringClient import MonitoringClient
+
+@pytest.fixture(scope='session')
+def context_client():
+    _client = ContextClient()
+    yield _client
+    _client.close()
+
+@pytest.fixture(scope='session')
+def device_client():
+    _client = DeviceClient()
+    yield _client
+    _client.close()
+
+@pytest.fixture(scope='session')
+def monitoring_client():
+    _client = MonitoringClient()
+    yield _client
+    _client.close()
diff --git a/src/tests/ecoc22/tests/Fixtures.py b/src/tests/ecoc22/tests/Fixtures.py
index 70b41bdcb159552daa3dcf0c041a3713e2d1c821..0e5c7fbe3107ea55ba8243be18e9b100571d1c4b 100644
--- a/src/tests/ecoc22/tests/Fixtures.py
+++ b/src/tests/ecoc22/tests/Fixtures.py
@@ -1,24 +1,24 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 import pytest
 from common.Settings import get_setting
 from compute.tests.mock_osm.MockOSM import MockOSM
-from context.client.ContextClient import ContextClient
-from device.client.DeviceClient import DeviceClient
 #from .Objects_BigNet import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME
 from .Objects_DC_CSGW_TN import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME
 #from .Objects_DC_CSGW_TN_OLS import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME
 
-@pytest.fixture(scope='session')
-def context_client():
-    _client = ContextClient()
-    yield _client
-    _client.close()
-
-@pytest.fixture(scope='session')
-def device_client():
-    _client = DeviceClient()
-    yield _client
-    _client.close()
-
 @pytest.fixture(scope='session')
 def osm_wim():
     wim_url = 'http://{:s}:{:s}'.format(
diff --git a/src/tests/ecoc22/tests/test_functional_bootstrap.py b/src/tests/ecoc22/tests/test_functional_bootstrap.py
index 14ee21658838b21d989646134f263f7961fc6c11..75f2bddf2c3bb21084efb6be3f5957df122da429 100644
--- a/src/tests/ecoc22/tests/test_functional_bootstrap.py
+++ b/src/tests/ecoc22/tests/test_functional_bootstrap.py
@@ -16,7 +16,7 @@ import logging
 from common.proto.context_pb2 import Context, ContextId, Device, DeviceId, Empty, Link, LinkId, Topology, TopologyId
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
-from .Fixtures import context_client, device_client
+from tests.Fixtures import context_client, device_client
 #from .Objects_BigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
 #from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, OBJECTS_PER_TOPOLOGY
 #from .Objects_DC_CSGW_TN_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, OBJECTS_PER_TOPOLOGY
diff --git a/src/tests/ecoc22/tests/test_functional_cleanup.py b/src/tests/ecoc22/tests/test_functional_cleanup.py
index 2fc61e818ac5371ea0730ce40db4f69e56324668..017cc991dd5bb49f6f02f178fc4354653b7bea43 100644
--- a/src/tests/ecoc22/tests/test_functional_cleanup.py
+++ b/src/tests/ecoc22/tests/test_functional_cleanup.py
@@ -17,7 +17,7 @@ from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, Topolog
 from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
-from .Fixtures import context_client, device_client
+from tests.Fixtures import context_client, device_client
 #from .Objects_BigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
 #from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
 #from .Objects_DC_CSGW_TN_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
diff --git a/src/tests/ecoc22/tests/test_functional_create_service.py b/src/tests/ecoc22/tests/test_functional_create_service.py
index 2f576db836b6868dbc8617c1e81686b4f6ee5093..8c9ca36a96d161f10ed69c1a86794abf78555571 100644
--- a/src/tests/ecoc22/tests/test_functional_create_service.py
+++ b/src/tests/ecoc22/tests/test_functional_create_service.py
@@ -17,7 +17,8 @@ from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from compute.tests.mock_osm.MockOSM import MockOSM
 from context.client.ContextClient import ContextClient
-from .Fixtures import context_client, osm_wim
+from tests.Fixtures import context_client
+from .Fixtures import osm_wim
 #from .Objects_BigNet import (
 #    CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
 #from .Objects_DC_CSGW_TN import (
diff --git a/src/tests/ecoc22/tests/test_functional_delete_service.py b/src/tests/ecoc22/tests/test_functional_delete_service.py
index 89d7a621fb21a7c26da8bd64269ca1e02ececebb..de152ebb71111c9201dfde18262586b242b04083 100644
--- a/src/tests/ecoc22/tests/test_functional_delete_service.py
+++ b/src/tests/ecoc22/tests/test_functional_delete_service.py
@@ -23,7 +23,8 @@ from common.tools.object_factory.Service import json_service_id
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from compute.tests.mock_osm.MockOSM import MockOSM
 from context.client.ContextClient import ContextClient
-from .Fixtures import context_client, osm_wim
+from tests.Fixtures import context_client
+from .Fixtures import osm_wim
 #from .Objects_BigNet import (
 #    CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
 #from .Objects_DC_CSGW_TN import (
diff --git a/src/tests/ofc22/tests/Fixtures.py b/src/tests/ofc22/tests/Fixtures.py
new file mode 100644
index 0000000000000000000000000000000000000000..370731e5de14b2c7c4acdcfa86eacfa66f2ffd4b
--- /dev/null
+++ b/src/tests/ofc22/tests/Fixtures.py
@@ -0,0 +1,25 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+from common.Settings import get_setting
+from compute.tests.mock_osm.MockOSM import MockOSM
+from .Objects import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME
+
+
+@pytest.fixture(scope='session')
+def osm_wim():
+    wim_url = 'http://{:s}:{:s}'.format(
+        get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP')))
+    return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD)
diff --git a/src/tests/ofc22/tests/test_functional_bootstrap.py b/src/tests/ofc22/tests/test_functional_bootstrap.py
index 3ea9393c5e7f575b24a7fd0ec2f5de929900d066..76c52810bb855a28f772dcc564e97e9f3ff1f92e 100644
--- a/src/tests/ofc22/tests/test_functional_bootstrap.py
+++ b/src/tests/ofc22/tests/test_functional_bootstrap.py
@@ -14,36 +14,24 @@
 
 import copy, logging, pytest
 from common.Settings import get_setting
+from common.proto.monitoring_pb2 import KpiDescriptorList
 from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Device import json_device_id
 from common.tools.object_factory.Link import json_link_id
 from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
+from monitoring.client.MonitoringClient import MonitoringClient
 from context.client.EventsCollector import EventsCollector
 from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology
 from device.client.DeviceClient import DeviceClient
 from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
-
+from tests.Fixtures import context_client, device_client, monitoring_client
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
 
-@pytest.fixture(scope='session')
-def context_client():
-    _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC'))
-    yield _client
-    _client.close()
-
-
-@pytest.fixture(scope='session')
-def device_client():
-    _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC'))
-    yield _client
-    _client.close()
-
-
 def test_scenario_empty(context_client : ContextClient):  # pylint: disable=redefined-outer-name
     # ----- List entities - Ensure database is empty -------------------------------------------------------------------
     response = context_client.ListContexts(Empty())
@@ -202,3 +190,15 @@ def test_links_created(context_client : ContextClient):  # pylint: disable=redef
 
     response = context_client.ListServices(ContextId(**CONTEXT_ID))
     assert len(response.services) == 0
+
+
+def test_scenario_kpis_created(monitoring_client: MonitoringClient):
+    """
+    This test validates that KPIs related to the service/device/endpoint were created
+    during the service creation process.
+    """
+    response: KpiDescriptorList = monitoring_client.GetKpiDescriptorList(Empty())
+    LOGGER.info("Number of KPIs created: {}".format(len(response.kpi_descriptor_list)))
+    # TODO: replace the magic number `16` below for a formula that adapts to the number
+    # of links and devices
+    assert len(response.kpi_descriptor_list) == 16
diff --git a/src/tests/ofc22/tests/test_functional_cleanup.py b/src/tests/ofc22/tests/test_functional_cleanup.py
index 60bb86b50853680e0699906dcb28ebd2e8777bb4..b0dfe54900f5a806607fcd669942e7fa592dcbaa 100644
--- a/src/tests/ofc22/tests/test_functional_cleanup.py
+++ b/src/tests/ofc22/tests/test_functional_cleanup.py
@@ -23,26 +23,13 @@ from context.client.ContextClient import ContextClient
 from context.client.EventsCollector import EventsCollector
 from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId
 from device.client.DeviceClient import DeviceClient
+from tests.Fixtures import context_client, device_client
 from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
 
-@pytest.fixture(scope='session')
-def context_client():
-    _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC'))
-    yield _client
-    _client.close()
-
-
-@pytest.fixture(scope='session')
-def device_client():
-    _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC'))
-    yield _client
-    _client.close()
-
-
 def test_services_removed(context_client : ContextClient):  # pylint: disable=redefined-outer-name
     # ----- List entities - Ensure service is removed ------------------------------------------------------------------
     response = context_client.ListContexts(Empty())
diff --git a/src/tests/ofc22/tests/test_functional_create_service.py b/src/tests/ofc22/tests/test_functional_create_service.py
index 1f5b80cbf250a7b58321fcae693acf078b6b0a67..5615f119b91fba10dd767d7188b303f926750e06 100644
--- a/src/tests/ofc22/tests/test_functional_create_service.py
+++ b/src/tests/ofc22/tests/test_functional_create_service.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, pytest
+import logging, pytest, random, time
 from common.DeviceTypes import DeviceTypeEnum
 from common.Settings import get_setting
 from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events
@@ -22,11 +22,14 @@ from common.tools.object_factory.Service import json_service_id
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from compute.tests.mock_osm.MockOSM import MockOSM
 from context.client.ContextClient import ContextClient
+from monitoring.client.MonitoringClient import MonitoringClient
 from context.client.EventsCollector import EventsCollector
 from common.proto.context_pb2 import ContextId, Empty
+from tests.Fixtures import context_client, monitoring_client
+from .Fixtures import osm_wim
 from .Objects import (
     CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES,
-    WIM_MAPPING, WIM_PASSWORD, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE, WIM_USERNAME)
+    WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE)
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
@@ -35,20 +38,6 @@ DEVTYPE_EMU_PR  = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value
 DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value
 
 
-@pytest.fixture(scope='session')
-def context_client():
-    _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC'))
-    yield _client
-    _client.close()
-
-
-@pytest.fixture(scope='session')
-def osm_wim():
-    wim_url = 'http://{:s}:{:s}'.format(
-        get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP')))
-    return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD)
-
-
 def test_scenario_is_correct(context_client : ContextClient):  # pylint: disable=redefined-outer-name
     # ----- List entities - Ensure links are created -------------------------------------------------------------------
     response = context_client.ListContexts(Empty())
@@ -69,8 +58,9 @@ def test_scenario_is_correct(context_client : ContextClient):  # pylint: disable
 
 def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
     # ----- Start the EventsCollector ----------------------------------------------------------------------------------
-    #events_collector = EventsCollector(context_client, log_events_received=True)
-    #events_collector.start()
+    # TODO: restablish the tests of the events
+    # events_collector = EventsCollector(context_client, log_events_received=True)
+    # events_collector.start()
 
     # ----- Create Service ---------------------------------------------------------------------------------------------
     service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS)
@@ -78,30 +68,30 @@ def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): #
 
     # ----- Validate collected events ----------------------------------------------------------------------------------
 
-    #packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR)
-    #optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS)
-    #optical_service_uuid = '{:s}:optical'.format(service_uuid)
+    # packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR)
+    # optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS)
+    # optical_service_uuid = '{:s}:optical'.format(service_uuid)
 
-    #expected_events = [
+    # expected_events = [
     #    # Create packet service and add first endpoint
     #    ('ServiceEvent',    EVENT_CREATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
     #    ('ServiceEvent',    EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
-    #
+    
     #    # Configure OLS controller, create optical service, create optical connection
     #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)),
     #    ('ServiceEvent',    EVENT_CREATE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)),
     #    ('ConnectionEvent', EVENT_CREATE, json_connection_id(optical_connection_uuid)),
-    #
+    
     #    # Configure endpoint packet devices, add second endpoint to service, create connection
     #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)),
     #    ('DeviceEvent',     EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)),
     #    ('ServiceEvent',    EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
     #    ('ConnectionEvent', EVENT_CREATE, json_connection_id(packet_connection_uuid)),
-    #]
-    #check_events(events_collector, expected_events)
+    # ]
+    # check_events(events_collector, expected_events)
 
-    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
-    #events_collector.stop()
+    # # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    # events_collector.stop()
 
 
 def test_scenario_service_created(context_client : ContextClient):  # pylint: disable=redefined-outer-name
@@ -127,3 +117,17 @@ def test_scenario_service_created(context_client : ContextClient):  # pylint: di
         LOGGER.info('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
             grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response)))
         assert len(response.connections) == 1 # one connection per service
+
+
+def test_scenario_kpi_values_created(monitoring_client: MonitoringClient):
+    """
+    This test validates that KPI values have been inserted into the monitoring database.
+    We short k KPI descriptors to test.
+    """
+    response = monitoring_client.GetKpiDescriptorList(Empty())
+    kpi_descriptors = random.choices(response.kpi_descriptor_list, k=2)
+
+    for kpi_descriptor in kpi_descriptors:
+        response = monitoring_client.GetInstantKpi(kpi_descriptor.kpi_id)
+        assert response.kpi_id.kpi_id.uuid == kpi_descriptor.kpi_id.kpi_id.uuid
+        assert response.timestamp.timestamp > 0
diff --git a/src/tests/ofc22/tests/test_functional_delete_service.py b/src/tests/ofc22/tests/test_functional_delete_service.py
index f0cc916cf9da794ec32550a609e4f45962370cfc..5d9568cd81906ac76b600a2253a5e0bdf741bc01 100644
--- a/src/tests/ofc22/tests/test_functional_delete_service.py
+++ b/src/tests/ofc22/tests/test_functional_delete_service.py
@@ -24,6 +24,8 @@ from compute.tests.mock_osm.MockOSM import MockOSM
 from context.client.ContextClient import ContextClient
 from context.client.EventsCollector import EventsCollector
 from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
+from tests.Fixtures import context_client
+from .Fixtures import osm_wim
 from .Objects import (
     CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING,
     WIM_PASSWORD, WIM_USERNAME)
@@ -36,20 +38,6 @@ DEVTYPE_EMU_PR  = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value
 DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value
 
 
-@pytest.fixture(scope='session')
-def context_client():
-    _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC'))
-    yield _client
-    _client.close()
-
-
-@pytest.fixture(scope='session')
-def osm_wim():
-    wim_url = 'http://{:s}:{:s}'.format(
-        get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP')))
-    return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD)
-
-
 def test_scenario_is_correct(context_client : ContextClient):  # pylint: disable=redefined-outer-name
     # ----- List entities - Ensure service is created ------------------------------------------------------------------
     response = context_client.ListContexts(Empty())
diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html
index 936b0f08fb1b7def156e11f16bf552b8d60018be..07734f32304b60365f76413d4689a37b66cc60a3 100644
--- a/src/webui/service/templates/slice/detail.html
+++ b/src/webui/service/templates/slice/detail.html
@@ -128,7 +128,7 @@
             <td>-</td>
             <td>
                 {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths;
-                {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}active
+                {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}-active
             </td>
         </tr>
         {% else %}