diff --git a/.gitignore b/.gitignore
index 20b98c30c5b3edb0983578b0a5f74fb1c1f3025e..e1f87cfd3842c264bd219237e9afe113d61c35bc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -176,3 +176,6 @@ libyang/
 
 # Other logs
 **/logs/*.log.*
+
+# PySpark checkpoints
+src/analytics/.spark/*
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index c7dfdbf696673bebba9cd9783e97acdf1b1d04a5..5a4e5b601cfd2d785fd847bda9b2bdd794d6ce37 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -49,6 +49,6 @@ include:
   - local: '/src/kpi_manager/.gitlab-ci.yml'
   - local: '/src/kpi_value_api/.gitlab-ci.yml'
   - local: '/src/kpi_value_writer/.gitlab-ci.yml'
-
+  - local: '/src/telemetry/.gitlab-ci.yml'
   # This should be last one: end-to-end integration tests
   - local: '/src/tests/.gitlab-ci.yml'
diff --git a/deploy/all.sh b/deploy/all.sh
index f93cd92ac5e3189b0dc8fa71d74a586e929aaecc..06b8ee701530f56381080879d0e2941b664e5197 100755
--- a/deploy/all.sh
+++ b/deploy/all.sh
@@ -33,7 +33,7 @@ export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice n
 #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
 
 # Uncomment to activate Monitoring Framework (new)
-#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api"
+#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics"
 
 # Uncomment to activate BGP-LS Speaker
 #export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
@@ -215,6 +215,9 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
 # Deploy QuestDB
 ./deploy/qdb.sh
 
+# Deploy Apache Kafka
+./deploy/kafka.sh
+
 # Expose Dashboard
 ./deploy/expose_dashboard.sh
 
diff --git a/deploy/kafka.sh b/deploy/kafka.sh
index 4a91bfc9e657d1b8a6a548b9c0a81a2f8a0b45e0..0483bce153b457800c6f7db2ef66685e90118111 100755
--- a/deploy/kafka.sh
+++ b/deploy/kafka.sh
@@ -20,50 +20,71 @@
 # If not already set, set the namespace where Apache Kafka will be deployed.
 export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"}
 
+# If not already set, set the port Apache Kafka server will be exposed to.
+export KFK_SERVER_PORT=${KFK_SERVER_PORT:-"9092"}
+
+# If not already set, if flag is YES, Apache Kafka will be redeployed and all topics will be lost.
+export KFK_REDEPLOY=${KFK_REDEPLOY:-""}
+
 
 ########################################################################################################################
 # Automated steps start here
 ########################################################################################################################
 
-# Constants
-TMP_FOLDER="./tmp"
-KFK_MANIFESTS_PATH="manifests/kafka"
-KFK_ZOOKEEPER_MANIFEST="01-zookeeper.yaml"
-KFK_MANIFEST="02-kafka.yaml"
+    # Constants
+    TMP_FOLDER="./tmp"
+    KFK_MANIFESTS_PATH="manifests/kafka"
+    KFK_ZOOKEEPER_MANIFEST="01-zookeeper.yaml"
+    KFK_MANIFEST="02-kafka.yaml"
+
+    # Create a tmp folder for files modified during the deployment
+    TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${KFK_NAMESPACE}/manifests"
+    mkdir -p ${TMP_MANIFESTS_FOLDER}
 
-# Create a tmp folder for files modified during the deployment
-TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${KFK_NAMESPACE}/manifests"
-mkdir -p ${TMP_MANIFESTS_FOLDER}
+function kafka_deploy() {
+     # copy zookeeper and kafka manifest files to temporary manifest location
+    cp "${KFK_MANIFESTS_PATH}/${KFK_ZOOKEEPER_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}"
+    cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}"
 
-# copy zookeeper and kafka manifest files to temporary manifest location
-cp "${KFK_MANIFESTS_PATH}/${KFK_ZOOKEEPER_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}"
-cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}"
+    # echo "Apache Kafka Namespace"
+    echo ">>> Delete Apache Kafka Namespace"
+    kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found
 
-echo "Apache Kafka Namespace"
-echo ">>> Delete Apache Kafka Namespace"
-kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found
+    echo ">>> Create Apache Kafka Namespace"
+    kubectl create namespace ${KFK_NAMESPACE}
 
-echo ">>> Create Apache Kafka Namespace"
-kubectl create namespace ${KFK_NAMESPACE}
+    # echo ">>> Deplying Apache Kafka Zookeeper"
+    # Kafka zookeeper service should be deployed before the kafka service
+    kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}"
 
-echo ">>> Deplying Apache Kafka Zookeeper"
-# Kafka zookeeper service should be deployed before the kafka service
-kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}"
+    KFK_ZOOKEEPER_SERVICE="zookeeper-service"    # this command may be replaced with command to extract service name automatically
+    KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}')
 
-KFK_ZOOKEEPER_SERVICE="zookeeper-service"    # this command may be replaced with command to extract service name automatically
-KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}')
+    # Kafka service should be deployed after the zookeeper service
+    sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
 
-# Kafka service should be deployed after the zookeeper service
-sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
+    # echo ">>> Deploying Apache Kafka Broker"
+    kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
 
-echo ">>> Deploying Apache Kafka Broker"
-kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
+    # echo ">>> Verifing Apache Kafka deployment"
+    sleep 5
+    # KFK_PODS_STATUS=$(kubectl --namespace ${KFK_NAMESPACE} get pods)
+    # if echo "$KFK_PODS_STATUS" | grep -qEv 'STATUS|Running'; then
+    #     echo "Deployment Error: \n $KFK_PODS_STATUS"
+    # else
+    #     echo "$KFK_PODS_STATUS"
+    # fi
+}
 
-echo ">>> Verifing Apache Kafka deployment"
-sleep 10
-KFK_PODS_STATUS=$(kubectl --namespace ${KFK_NAMESPACE} get pods)
-if echo "$KFK_PODS_STATUS" | grep -qEv 'STATUS|Running'; then
-    echo "Deployment Error: \n $KFK_PODS_STATUS"
+echo "Apache Kafka"
+echo ">>> Checking if Apache Kafka is deployed ... "
+if [ "$KFK_REDEPLOY" == "YES" ]; then
+    echo ">>> Redeploying kafka namespace"
+    kafka_deploy
+elif kubectl get namespace "${KFK_NAMESPACE}" &> /dev/null; then
+    echo ">>> Apache Kafka already present; skipping step." 
 else
-    echo "$KFK_PODS_STATUS"
-fi
\ No newline at end of file
+    echo ">>> Kafka namespace doesn't exists. Deploying kafka namespace"
+    kafka_deploy
+fi
+echo
diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index 62f36a2c138c99b1ee666c8c5397083266ad699d..1dceae1c1b4ee3e2a36816557b54df48b224eba1 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -115,6 +115,17 @@ export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"}
 export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
 
 
+# ----- Apache Kafka ------------------------------------------------------
+
+# If not already set, set the namespace where Apache Kafka will be deployed.
+export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"}
+
+# If not already set, set the port Apache Kafka server will be exposed to.
+export KFK_SERVER_PORT=${KFK_SERVER_PORT:-"9092"}
+
+# If not already set, if flag is YES, Apache Kafka will be redeployed and topic will be lost.
+export KFK_REDEPLOY=${KFK_REDEPLOY:-""}
+
 ########################################################################################################################
 # Automated steps start here
 ########################################################################################################################
@@ -147,7 +158,7 @@ kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type=
     --from-literal=CRDB_SSLMODE=require
 printf "\n"
 
-echo "Create secret with CockroachDB data for KPI Management"
+echo "Create secret with CockroachDB data for KPI Management microservices"
 CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
 CRDB_DATABASE_KPI_MGMT="tfs_kpi_mgmt"  # TODO: change by specific configurable environment variable
 kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
@@ -159,6 +170,37 @@ kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --t
     --from-literal=CRDB_SSLMODE=require
 printf "\n"
 
+echo "Create secret with CockroachDB data for Telemetry microservices"
+CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+CRDB_DATABASE_TELEMETRY="tfs_telemetry"  # TODO: change by specific configurable environment variable
+kubectl create secret generic crdb-telemetry --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
+    --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
+    --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
+    --from-literal=CRDB_DATABASE=${CRDB_DATABASE_TELEMETRY} \
+    --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
+    --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
+    --from-literal=CRDB_SSLMODE=require
+printf "\n"
+
+echo "Create secret with CockroachDB data for Analytics microservices"
+CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+CRDB_DATABASE_ANALYTICS="tfs_analytics"  # TODO: change by specific configurable environment variable
+kubectl create secret generic crdb-analytics --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
+    --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
+    --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
+    --from-literal=CRDB_DATABASE=${CRDB_DATABASE_ANALYTICS} \
+    --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
+    --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
+    --from-literal=CRDB_SSLMODE=require
+printf "\n"
+
+echo "Create secret with Apache Kafka data for KPI, Telemetry and Analytics microservices"
+KFK_SERVER_PORT=$(kubectl --namespace ${KFK_NAMESPACE} get service kafka-service -o 'jsonpath={.spec.ports[0].port}')
+kubectl create secret generic kfk-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
+    --from-literal=KFK_NAMESPACE=${KFK_NAMESPACE} \
+    --from-literal=KFK_SERVER_PORT=${KFK_SERVER_PORT}
+printf "\n"
+
 echo "Create secret with NATS data"
 NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
 if [ -z "$NATS_CLIENT_PORT" ]; then
@@ -234,15 +276,17 @@ for COMPONENT in $TFS_COMPONENTS; do
 
         if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then
             $DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
-        elif [ "$COMPONENT" == "pathcomp" ]; then
+        elif [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ] || [ "$COMPONENT" == "analytics" ]; then
             BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
             $DOCKER_BUILD -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG"
 
             BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
             $DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG"
-            # next command is redundant, but helpful to keep cache updated between rebuilds
-            IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder"
-            $DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
+            if [ "$COMPONENT" == "pathcomp" ]; then
+                # next command is redundant, but helpful to keep cache updated between rebuilds
+                IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder"
+                $DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
+            fi
         elif [ "$COMPONENT" == "dlt" ]; then
             BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log"
             $DOCKER_BUILD -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG"
@@ -255,7 +299,7 @@ for COMPONENT in $TFS_COMPONENTS; do
 
         echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGES'..."
 
-        if [ "$COMPONENT" == "pathcomp" ]; then
+        if [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ] || [ "$COMPONENT" == "analytics" ] ; then
             IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
 
             TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log"
@@ -306,7 +350,7 @@ for COMPONENT in $TFS_COMPONENTS; do
         cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
     fi
 
-    if [ "$COMPONENT" == "pathcomp" ]; then
+    if [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ] || [ "$COMPONENT" == "analytics" ]; then
         IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
         VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f4)
         sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
diff --git a/manifests/analyticsservice.yaml b/manifests/analyticsservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0fa3ed0be6eda8cf944e199543e3c2cd59cc98d6
--- /dev/null
+++ b/manifests/analyticsservice.yaml
@@ -0,0 +1,128 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: analyticsservice
+spec:
+  selector:
+    matchLabels:
+      app: analyticsservice
+  #replicas: 1
+  template:
+    metadata:
+      labels:
+        app: analyticsservice
+    spec:
+      terminationGracePeriodSeconds: 5
+      containers:
+        - name: frontend
+          image: labs.etsi.org:5050/tfs/controller/analytics-frontend:latest
+          imagePullPolicy: Always
+          ports:
+            - containerPort: 30080
+            - containerPort: 9192
+          env:
+            - name: LOG_LEVEL
+              value: "INFO"
+          envFrom:
+            - secretRef:
+                name: crdb-analytics
+            - secretRef:
+                name: kfk-kpi-data
+          readinessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30080"]
+          livenessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30080"]
+          resources:
+            requests:
+              cpu: 250m
+              memory: 128Mi
+            limits:
+              cpu: 1000m
+              memory: 1024Mi
+        - name: backend
+          image: labs.etsi.org:5050/tfs/controller/analytics-backend:latest
+          imagePullPolicy: Always
+          ports:
+            - containerPort: 30090
+            - containerPort: 9192
+          env:
+            - name: LOG_LEVEL
+              value: "INFO"
+          envFrom:
+            - secretRef:
+                name: kfk-kpi-data
+          readinessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30090"]
+          livenessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30090"]
+          resources:
+            requests:
+              cpu: 250m
+              memory: 128Mi
+            limits:
+              cpu: 1000m
+              memory: 1024Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: analyticsservice
+  labels:
+    app: analyticsservice
+spec:
+  type: ClusterIP
+  selector:
+    app: analyticsservice
+  ports:
+    - name: frontend-grpc
+      protocol: TCP
+      port: 30080
+      targetPort: 30080
+    - name: backend-grpc
+      protocol: TCP
+      port: 30090
+      targetPort: 30090
+    - name: metrics
+      protocol: TCP
+      port: 9192
+      targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: analyticsservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: analyticsservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+    - type: Resource
+      resource:
+        name: cpu
+        target:
+          type: Utilization
+          averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/kafka/02-kafka.yaml b/manifests/kafka/02-kafka.yaml
index 8e4562e6eabec34bf3b87912310479bd98022aeb..8400f5944193458ccdad8be5dbc189f8f40cdd7b 100644
--- a/manifests/kafka/02-kafka.yaml
+++ b/manifests/kafka/02-kafka.yaml
@@ -53,9 +53,9 @@ spec:
         - name: KAFKA_LISTENERS
           value: PLAINTEXT://:9092
         - name: KAFKA_ADVERTISED_LISTENERS
-          value: PLAINTEXT://localhost:9092
+          value: PLAINTEXT://kafka-service.kafka.svc.cluster.local:9092
         image: wurstmeister/kafka
         imagePullPolicy: IfNotPresent
         name: kafka-broker
         ports:
-          - containerPort: 9092
\ No newline at end of file
+          - containerPort: 9092
diff --git a/manifests/kpi_value_apiservice.yaml b/manifests/kpi_value_apiservice.yaml
index 74eb90f675794f1b451b04af55e191edff58fae5..e4dcb00545ffaa33de39fd29c029780b777ea91f 100644
--- a/manifests/kpi_value_apiservice.yaml
+++ b/manifests/kpi_value_apiservice.yaml
@@ -39,6 +39,9 @@ spec:
           env:
             - name: LOG_LEVEL
               value: "INFO"
+          envFrom:
+            - secretRef:
+                name: kfk-kpi-data
           readinessProbe:
             exec:
               command: ["/bin/grpc_health_probe", "-addr=:30020"]
diff --git a/manifests/kpi_value_writerservice.yaml b/manifests/kpi_value_writerservice.yaml
index 8a8e44ec2a571f1290e30a08d1c896a6339cbe46..e21e36f48ba08999f142e8548fed61cd2dfef0cc 100644
--- a/manifests/kpi_value_writerservice.yaml
+++ b/manifests/kpi_value_writerservice.yaml
@@ -39,6 +39,9 @@ spec:
           env:
             - name: LOG_LEVEL
               value: "INFO"
+          envFrom:
+            - secretRef:
+                name: kfk-kpi-data
           readinessProbe:
             exec:
               command: ["/bin/grpc_health_probe", "-addr=:30030"]
diff --git a/manifests/telemetryservice.yaml b/manifests/telemetryservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2f9917499a425b95d436ffa8cdb311d29483d2ca
--- /dev/null
+++ b/manifests/telemetryservice.yaml
@@ -0,0 +1,128 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: telemetryservice
+spec:
+  selector:
+    matchLabels:
+      app: telemetryservice
+  #replicas: 1
+  template:
+    metadata:
+      labels:
+        app: telemetryservice
+    spec:
+      terminationGracePeriodSeconds: 5
+      containers:
+        - name: frontend
+          image: labs.etsi.org:5050/tfs/controller/telemetry-frontend:latest
+          imagePullPolicy: Always
+          ports:
+            - containerPort: 30050
+            - containerPort: 9192
+          env:
+            - name: LOG_LEVEL
+              value: "INFO"
+          envFrom:
+            - secretRef:
+                name: crdb-telemetry
+            - secretRef:
+                name: kfk-kpi-data
+          readinessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30050"]
+          livenessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30050"]
+          resources:
+            requests:
+              cpu: 250m
+              memory: 128Mi
+            limits:
+              cpu: 1000m
+              memory: 1024Mi
+        - name: backend
+          image: labs.etsi.org:5050/tfs/controller/telemetry-backend:latest
+          imagePullPolicy: Always
+          ports:
+            - containerPort: 30060
+            - containerPort: 9192
+          env:
+            - name: LOG_LEVEL
+              value: "INFO"
+          envFrom:
+            - secretRef:
+                name: kfk-kpi-data
+          readinessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30060"]
+          livenessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30060"]
+          resources:
+            requests:
+              cpu: 250m
+              memory: 128Mi
+            limits:
+              cpu: 1000m
+              memory: 1024Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: telemetryservice
+  labels:
+    app: telemetryservice
+spec:
+  type: ClusterIP
+  selector:
+    app: telemetryservice
+  ports:
+    - name: frontend-grpc
+      protocol: TCP
+      port: 30050
+      targetPort: 30050
+    - name: backend-grpc
+      protocol: TCP
+      port: 30060
+      targetPort: 30060
+    - name: metrics
+      protocol: TCP
+      port: 9192
+      targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: telemetryservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: telemetryservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+    - type: Resource
+      resource:
+        name: cpu
+        target:
+          type: Utilization
+          averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/my_deploy.sh b/my_deploy.sh
index c3337ef39f087e19f3455d8a32e5448316f94250..34d2a8977fe2694396a0173f5c3936b6164bc70c 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -181,3 +181,8 @@ export GRAF_EXT_PORT_HTTP="3000"
 # Set the namespace where Apache Kafka will be deployed.
 export KFK_NAMESPACE="kafka"
 
+# Set the port Apache Kafka server will be exposed to.
+export KFK_SERVER_PORT="9092"
+
+# Set the flag to YES for redeploying of Apache Kafka
+export KFK_REDEPLOY=""
diff --git a/proto/analytics_frontend.proto b/proto/analytics_frontend.proto
index 096c1ee035ae663359d9f4df1e071d3997a0d351..ace0581db816bee1d0d20746f2b864dce602567b 100644
--- a/proto/analytics_frontend.proto
+++ b/proto/analytics_frontend.proto
@@ -30,21 +30,25 @@ message AnalyzerId {
 }
 
 enum AnalyzerOperationMode {
-  ANALYZEROPERATIONMODE_BATCH     = 0;
-  ANALYZEROPERATIONMODE_STREAMING = 1;
+  ANALYZEROPERATIONMODE_UNSPECIFIED = 0;
+  ANALYZEROPERATIONMODE_BATCH       = 1;
+  ANALYZEROPERATIONMODE_STREAMING   = 2;
 }
 
+// duration field may be added in analyzer... 
 message Analyzer {
-  string                     algorithm_name       = 1; // The algorithm to be executed
-  repeated kpi_manager.KpiId input_kpi_ids        = 2; // The KPI Ids to be processed by the analyzer
-  repeated kpi_manager.KpiId output_kpi_ids       = 3; // The KPI Ids produced by the analyzer
-  AnalyzerOperationMode      operation_mode       = 4; // Operation mode of the analyzer
-
-  // In batch mode...
-  float                      batch_min_duration_s = 5; // ..., min duration to collect before executing batch
-  float                      batch_max_duration_s = 6; // ..., max duration collected to execute the batch
-  uint64                     batch_min_size       = 7; // ..., min number of samples to collect before executing batch
-  uint64                     batch_max_size       = 8; // ..., max number of samples collected to execute the batch
+  AnalyzerId                 analyzer_id          = 1;
+  string                     algorithm_name       = 2;  // The algorithm to be executed
+  float                      duration_s           = 3;  // Termiate the data analytics thread after duration (seconds); 0 = infinity time
+  repeated kpi_manager.KpiId input_kpi_ids        = 4;  // The KPI Ids to be processed by the analyzer
+  repeated kpi_manager.KpiId output_kpi_ids       = 5;  // The KPI Ids produced by the analyzer
+  AnalyzerOperationMode      operation_mode       = 6;  // Operation mode of the analyzer
+  map<string, string>        parameters           = 7;  // Add dictionary of (key, value) pairs such as (window_size, 10) etc.
+  // In batch mode... 
+  float                      batch_min_duration_s = 8;  // ..., min duration to collect before executing batch
+  float                      batch_max_duration_s = 9;  // ..., max duration collected to execute the batch
+  uint64                     batch_min_size       = 10; // ..., min number of samples to collect before executing batch
+  uint64                     batch_max_size       = 11; // ..., max number of samples collected to execute the batch
 }
 
 message AnalyzerFilter {
diff --git a/proto/telemetry_frontend.proto b/proto/telemetry_frontend.proto
index dbc1e8bf688f9f2df341484c1929e2338c458bbf..614d10cf06cdbb1ff4fba6e51a39286eb5132688 100644
--- a/proto/telemetry_frontend.proto
+++ b/proto/telemetry_frontend.proto
@@ -19,9 +19,9 @@ import "context.proto";
 import "kpi_manager.proto";
 
 service TelemetryFrontendService {
-  rpc StartCollector  (Collector      ) returns (CollectorId  ) {}
-  rpc StopCollector   (CollectorId    ) returns (context.Empty) {}
-  rpc SelectCollectors(CollectorFilter) returns (CollectorList) {}
+  rpc StartCollector   (Collector      ) returns (CollectorId  ) {}
+  rpc StopCollector    (CollectorId    ) returns (context.Empty) {}
+  rpc SelectCollectors (CollectorFilter) returns (CollectorList) {}
 }
 
 message CollectorId {
@@ -29,10 +29,12 @@ message CollectorId {
 }
 
 message Collector {
-  CollectorId       collector_id = 1; // The Collector ID
-  kpi_manager.KpiId kpi_id       = 2; // The KPI Id to be associated to the collected samples
-  float             duration_s   = 3; // Terminate data collection after duration[seconds]; duration==0 means indefinitely
-  float             interval_s   = 4; // Interval between collected samples
+  CollectorId        collector_id = 1; // The Collector ID
+  kpi_manager.KpiId  kpi_id       = 2; // The KPI Id to be associated to the collected samples
+  float              duration_s   = 3; // Terminate data collection after duration[seconds]; duration==0 means indefinitely
+  float              interval_s   = 4; // Interval between collected samples
+  context.Timestamp  start_time   = 5; // Timestamp when Collector start execution
+  context.Timestamp  end_time     = 6; // Timestamp when Collector stop execution
 }
 
 message CollectorFilter {
diff --git a/scripts/run_tests_locally-telemetry-mgtDB.sh b/scripts/run_tests_locally-analytics-DB.sh
similarity index 69%
rename from scripts/run_tests_locally-telemetry-mgtDB.sh
rename to scripts/run_tests_locally-analytics-DB.sh
index 8b68104eaf343b57ec4953334cda37167cca3529..9df5068d6bde361a4a1e73b96990c0d407c88cb4 100755
--- a/scripts/run_tests_locally-telemetry-mgtDB.sh
+++ b/scripts/run_tests_locally-analytics-DB.sh
@@ -17,10 +17,8 @@
 PROJECTDIR=`pwd`
 
 cd $PROJECTDIR/src
-# RCFILE=$PROJECTDIR/coverage/.coveragerc
-# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
-#     kpi_manager/tests/test_unitary.py
-
 RCFILE=$PROJECTDIR/coverage/.coveragerc
-python3 -m pytest --log-cli-level=INFO --verbose \
-    telemetry/database/tests/managementDBtests.py
+CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}')
+export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
+python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
+    analytics/tests/test_analytics_db.py
diff --git a/scripts/run_tests_locally-analytics-frontend.sh b/scripts/run_tests_locally-analytics-frontend.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e30d30da623b2d0eee3d925d69a846b4b1f516a3
--- /dev/null
+++ b/scripts/run_tests_locally-analytics-frontend.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}')
+export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
+python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
+    analytics/frontend/tests/test_frontend.py
diff --git a/scripts/run_tests_locally-kpi-manager.sh b/scripts/run_tests_locally-kpi-manager.sh
index a6a24f90db93d56300ac997bd00675c479ef13ae..8a4ce8d95c74657451147078a1d93e891dfc2ac8 100755
--- a/scripts/run_tests_locally-kpi-manager.sh
+++ b/scripts/run_tests_locally-kpi-manager.sh
@@ -24,7 +24,7 @@ cd $PROJECTDIR/src
 # python3 kpi_manager/tests/test_unitary.py
 
 RCFILE=$PROJECTDIR/coverage/.coveragerc
-CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
+CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace ${CRDB_NAMESPACE} -o 'jsonpath={.spec.clusterIP}')
 export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
 python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
     kpi_manager/tests/test_kpi_manager.py
diff --git a/scripts/run_tests_locally-kpi-value-API.sh b/scripts/run_tests_locally-kpi-value-API.sh
index 8dfbfb16237634519dcae2fcc34f850a5188c1e7..3953d2a89c6fbe2bd3546e648246b9b018e5fdb0 100755
--- a/scripts/run_tests_locally-kpi-value-API.sh
+++ b/scripts/run_tests_locally-kpi-value-API.sh
@@ -19,7 +19,8 @@ PROJECTDIR=`pwd`
 cd $PROJECTDIR/src
 
 RCFILE=$PROJECTDIR/coverage/.coveragerc
-
+KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+KFK_SERVER_ADDRESS=${KAFKA_IP}:9092
 # helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
 python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG -o log_cli=true --verbose \
     kpi_value_api/tests/test_kpi_value_api.py
diff --git a/scripts/run_tests_locally-telemetry-DB.sh b/scripts/run_tests_locally-telemetry-DB.sh
index bb1c48b76440c00b398875a8f704c2a82ba4ab50..4b9a417603cc42a4e7e8b19c7394cc38633817fa 100755
--- a/scripts/run_tests_locally-telemetry-DB.sh
+++ b/scripts/run_tests_locally-telemetry-DB.sh
@@ -22,5 +22,5 @@ cd $PROJECTDIR/src
 #     kpi_manager/tests/test_unitary.py
 
 RCFILE=$PROJECTDIR/coverage/.coveragerc
-python3 -m pytest --log-cli-level=INFO --verbose \
-    telemetry/database/tests/telemetryDBtests.py
+python3 -m pytest --log-level=DEBUG --log-cli-level=debug --verbose \
+    telemetry/tests/test_telemetryDB.py
diff --git a/scripts/run_tests_locally-telemetry-backend.sh b/scripts/run_tests_locally-telemetry-backend.sh
index 9cf404ffcef6c99b261f81eb0c6b910dd60845e5..79db05fcf1259365e8a909ee99395eb59dfb9437 100755
--- a/scripts/run_tests_locally-telemetry-backend.sh
+++ b/scripts/run_tests_locally-telemetry-backend.sh
@@ -24,5 +24,5 @@ cd $PROJECTDIR/src
 # python3 kpi_manager/tests/test_unitary.py
 
 RCFILE=$PROJECTDIR/coverage/.coveragerc
-python3 -m pytest --log-level=INFO --log-cli-level=INFO --verbose \
-    telemetry/backend/tests/testTelemetryBackend.py
+python3 -m pytest --log-level=INFO --log-cli-level=debug --verbose \
+    telemetry/backend/tests/test_TelemetryBackend.py
diff --git a/scripts/run_tests_locally-telemetry-frontend.sh b/scripts/run_tests_locally-telemetry-frontend.sh
index 7652ccb583268285dcd2fcf3090b717dc18e4fc3..a2a1de52340cac527d4d1c446c76740d38ce7783 100755
--- a/scripts/run_tests_locally-telemetry-frontend.sh
+++ b/scripts/run_tests_locally-telemetry-frontend.sh
@@ -24,5 +24,5 @@ cd $PROJECTDIR/src
 # python3 kpi_manager/tests/test_unitary.py
 
 RCFILE=$PROJECTDIR/coverage/.coveragerc
-python3 -m pytest --log-level=INFO --log-cli-level=INFO --verbose \
+python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
     telemetry/frontend/tests/test_frontend.py
diff --git a/scripts/show_logs_analytics_backend.sh b/scripts/show_logs_analytics_backend.sh
new file mode 100755
index 0000000000000000000000000000000000000000..afb58567ca5ab250da48d2cfffa2c56abdff2db2
--- /dev/null
+++ b/scripts/show_logs_analytics_backend.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/analyticsservice -c backend
diff --git a/scripts/show_logs_analytics_frontend.sh b/scripts/show_logs_analytics_frontend.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6d3fae10b366f0082d3a393c224e8f1cb7830721
--- /dev/null
+++ b/scripts/show_logs_analytics_frontend.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/analyticsservice -c frontend
diff --git a/src/analytics/README.md b/src/analytics/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9663e5321ace6866491b90553553d9ccbf5793a1
--- /dev/null
+++ b/src/analytics/README.md
@@ -0,0 +1,4 @@
+# How to locally run and test Analytic service (To be added soon)
+
+### Pre-requisets 
+The following requirements should be fulfilled before the execuation of Telemetry service.
diff --git a/src/telemetry/database/tests/__init__.py b/src/analytics/__init__.py
similarity index 94%
rename from src/telemetry/database/tests/__init__.py
rename to src/analytics/__init__.py
index 839e45e3b646bc60de7edd81fcfb91b7b38feadf..bbfc943b68af13a11e562abbc8680ade71db8f02 100644
--- a/src/telemetry/database/tests/__init__.py
+++ b/src/analytics/__init__.py
@@ -10,4 +10,4 @@
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
-# limitations under the License.
\ No newline at end of file
+# limitations under the License.
diff --git a/src/analytics/backend/Dockerfile b/src/analytics/backend/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..17adcd3ab1df5704cc7ef0c5a19b3cfb1539ee22
--- /dev/null
+++ b/src/analytics/backend/Dockerfile
@@ -0,0 +1,69 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ git && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/analytics/backend
+WORKDIR /var/teraflow/analytics/backend
+COPY src/analytics/backend/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/analytics/__init__.py analytics/__init__.py
+COPY src/analytics/backend/. analytics/backend/
+
+# Start the service
+ENTRYPOINT ["python", "-m", "analytics.backend.service"]
diff --git a/src/analytics/backend/__init__.py b/src/analytics/backend/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02
--- /dev/null
+++ b/src/analytics/backend/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/telemetry/database/tests/managementDBtests.py b/src/analytics/backend/requirements.in
similarity index 70%
rename from src/telemetry/database/tests/managementDBtests.py
rename to src/analytics/backend/requirements.in
index 24138abe42be742bd9b16d7840343f9d7c7fe133..9df678fe819f33d479b8f5090ca9ac4eb1f4047c 100644
--- a/src/telemetry/database/tests/managementDBtests.py
+++ b/src/analytics/backend/requirements.in
@@ -12,11 +12,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
-from telemetry.database.managementDB import managementDB
-from telemetry.database.tests.messages import create_collector_model_object
-
-
-def test_add_row_to_db():
-    managementDBobj = managementDB()
-    managementDBobj.add_row_to_db(create_collector_model_object())
\ No newline at end of file
+pyspark==3.5.2
+confluent-kafka==2.3.*
diff --git a/src/analytics/backend/service/AnalyticsBackendService.py b/src/analytics/backend/service/AnalyticsBackendService.py
new file mode 100755
index 0000000000000000000000000000000000000000..595603567fe537d9f7b33224cba0fe016a439631
--- /dev/null
+++ b/src/analytics/backend/service/AnalyticsBackendService.py
@@ -0,0 +1,132 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import json
+import logging
+import threading
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from analytics.backend.service.SparkStreaming import SparkStreamer
+from common.tools.kafka.Variables import KafkaConfig, KafkaTopic
+from confluent_kafka import Consumer as KafkaConsumer
+from confluent_kafka import KafkaError
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+
+
+LOGGER = logging.getLogger(__name__)
+
+class AnalyticsBackendService(GenericGrpcService):
+    """
+    Class listens for ...
+    """
+    def __init__(self, cls_name : str = __name__) -> None:
+        LOGGER.info('Init AnalyticsBackendService')
+        port = get_service_port_grpc(ServiceNameEnum.ANALYTICSBACKEND)
+        super().__init__(port, cls_name=cls_name)
+        self.running_threads = {}       # To keep track of all running analyzers 
+        self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(),
+                                            'group.id'           : 'analytics-frontend',
+                                            'auto.offset.reset'  : 'latest'})
+
+    def StartSparkStreamer(self, analyzer_uuid, analyzer):
+        kpi_list      = analyzer['input_kpis'] 
+        oper_list     = [s.replace('_value', '') for s in list(analyzer["thresholds"].keys())]  # TODO: update this line...
+        thresholds    = analyzer['thresholds']
+        window_size   = analyzer['window_size']
+        window_slider = analyzer['window_slider']
+        print ("Received parameters: {:} - {:} - {:} - {:} - {:}".format(
+            kpi_list, oper_list, thresholds, window_size, window_slider))
+        LOGGER.debug ("Received parameters: {:} - {:} - {:} - {:} - {:}".format(
+            kpi_list, oper_list, thresholds, window_size, window_slider))
+        try:
+            stop_event = threading.Event()
+            thread = threading.Thread(target=SparkStreamer, 
+                            args=(analyzer_uuid, kpi_list, oper_list, thresholds, stop_event,
+                                  window_size, window_slider, None ))
+            self.running_threads[analyzer_uuid] = (thread, stop_event)
+            thread.start()
+            print      ("Initiated Analyzer backend: {:}".format(analyzer_uuid))
+            LOGGER.info("Initiated Analyzer backend: {:}".format(analyzer_uuid))
+            return True
+        except Exception as e:
+            print       ("Failed to initiate Analyzer backend: {:}".format(e))
+            LOGGER.error("Failed to initiate Analyzer backend: {:}".format(e))
+            return False
+
+    def StopRequestListener(self, threadInfo: tuple):
+        try:
+            thread, stop_event = threadInfo
+            stop_event.set()
+            thread.join()
+            print      ("Terminating Analytics backend RequestListener")
+            LOGGER.info("Terminating Analytics backend RequestListener")
+            return True
+        except Exception as e:
+            print       ("Failed to terminate analytics backend {:}".format(e))
+            LOGGER.error("Failed to terminate analytics backend {:}".format(e))
+            return False
+
+    def install_services(self):
+        stop_event = threading.Event()
+        thread = threading.Thread(target=self.RequestListener,
+                                  args=(stop_event,) )
+        thread.start()
+        return (thread, stop_event)
+
+    def RequestListener(self, stop_event):
+        """
+        listener for requests on Kafka topic.
+        """
+        consumer = self.kafka_consumer
+        consumer.subscribe([KafkaTopic.ANALYTICS_REQUEST.value])
+        while not stop_event.is_set():
+            receive_msg = consumer.poll(2.0)
+            if receive_msg is None:
+                continue
+            elif receive_msg.error():
+                if receive_msg.error().code() == KafkaError._PARTITION_EOF:
+                    continue
+                else:
+                    print("Consumer error: {}".format(receive_msg.error()))
+                    break
+            analyzer    = json.loads(receive_msg.value().decode('utf-8'))
+            analyzer_uuid = receive_msg.key().decode('utf-8')
+            LOGGER.debug('Recevied Analyzer: {:} - {:}'.format(analyzer_uuid, analyzer))
+            print       ('Recevied Analyzer: {:} - {:}'.format(analyzer_uuid, analyzer))
+
+            if analyzer["algo_name"] is None and analyzer["oper_mode"] is None:
+                self.TerminateAnalyzerBackend(analyzer_uuid)
+            else:
+                self.StartSparkStreamer(analyzer_uuid, analyzer)
+        LOGGER.debug("Stop Event activated. Terminating...")
+        print       ("Stop Event activated. Terminating...")
+
+    def TerminateAnalyzerBackend(self, analyzer_uuid):
+        if analyzer_uuid in self.running_threads:
+            try:
+                thread, stop_event = self.running_threads[analyzer_uuid]
+                stop_event.set()
+                thread.join()
+                del self.running_threads[analyzer_uuid]
+                print      ("Terminating backend (by TerminateBackend): Analyzer Id: {:}".format(analyzer_uuid))
+                LOGGER.info("Terminating backend (by TerminateBackend): Analyzer Id: {:}".format(analyzer_uuid))
+                return True
+            except Exception as e:
+                LOGGER.error("Failed to terminate. Analyzer Id: {:} - ERROR: {:}".format(analyzer_uuid, e))
+                return False
+        else:
+            print         ("Analyzer not found in active collectors. Analyzer Id: {:}".format(analyzer_uuid))
+            LOGGER.warning("Analyzer not found in active collectors: Analyzer Id: {:}".format(analyzer_uuid))           
+            # generate confirmation towards frontend
diff --git a/src/analytics/backend/service/SparkStreaming.py b/src/analytics/backend/service/SparkStreaming.py
new file mode 100644
index 0000000000000000000000000000000000000000..96e1aa05d898ffdd23c533b74ee87fbf03f54576
--- /dev/null
+++ b/src/analytics/backend/service/SparkStreaming.py
@@ -0,0 +1,154 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import logging, time
+from pyspark.sql                  import SparkSession
+from pyspark.sql.types            import StructType, StructField, StringType, DoubleType, TimestampType
+from pyspark.sql.functions        import from_json, col, window, avg, min, max, first, last, stddev, when, round
+from common.tools.kafka.Variables import KafkaConfig, KafkaTopic
+
+LOGGER = logging.getLogger(__name__)
+
+def DefiningSparkSession():
+    # Create a Spark session with specific spark verions (3.5.0)
+    return SparkSession.builder \
+            .appName("Analytics") \
+            .config("spark.sql.streaming.forceDeleteTempCheckpointLocation", "true") \
+            .config("spark.jars.packages", "org.apache.spark:spark-sql-kafka-0-10_2.12:3.5.0") \
+            .getOrCreate()
+
+def SettingKafkaConsumerParams():   # TODO:  create get_kafka_consumer() in common with inputs (bootstrap server, subscribe, startingOffset and failOnDataLoss with default values)
+    return {
+            # "kafka.bootstrap.servers": '127.0.0.1:9092',
+            "kafka.bootstrap.servers": KafkaConfig.get_kafka_address(),
+            "subscribe"              : KafkaTopic.VALUE.value,
+            "startingOffsets"        : 'latest',
+            "failOnDataLoss"         : 'false'              # Optional: Set to "true" to fail the query on data loss
+        }
+
+def DefiningRequestSchema():
+    return StructType([
+            StructField("time_stamp" ,  StringType()  , True),
+            StructField("kpi_id"     ,  StringType()  , True),
+            StructField("kpi_value"  ,  DoubleType()  , True)
+        ])
+
+def GetAggregations(oper_list):
+    # Define the possible aggregation functions
+    agg_functions = {
+        'avg'  :  round(avg    ("kpi_value"), 3) .alias("avg_value"),
+        'min'  :  round(min    ("kpi_value"), 3) .alias("min_value"),
+        'max'  :  round(max    ("kpi_value"), 3) .alias("max_value"),
+        'first':  round(first  ("kpi_value"), 3) .alias("first_value"),
+        'last' :  round(last   ("kpi_value"), 3) .alias("last_value"),
+        'stdev':  round(stddev ("kpi_value"), 3) .alias("stdev_value")
+    }
+    return [agg_functions[op] for op in oper_list if op in agg_functions]   # Filter and return only the selected aggregations
+
+def ApplyThresholds(aggregated_df, thresholds):
+    # Apply thresholds (TH-Fail and TH-RAISE) based on the thresholds dictionary on the aggregated DataFrame.
+    
+    # Loop through each column name and its associated thresholds
+    for col_name, (fail_th, raise_th) in thresholds.items():
+        # Apply TH-Fail condition (if column value is less than the fail threshold)
+        aggregated_df = aggregated_df.withColumn(
+            f"{col_name}_THRESHOLD_FAIL", 
+            when(col(col_name) < fail_th, True).otherwise(False)
+        )
+        # Apply TH-RAISE condition (if column value is greater than the raise threshold)
+        aggregated_df = aggregated_df.withColumn(
+            f"{col_name}_THRESHOLD_RAISE", 
+            when(col(col_name) > raise_th, True).otherwise(False)
+        )
+    return aggregated_df
+
+def SparkStreamer(key, kpi_list, oper_list, thresholds, stop_event,
+                  window_size=None, win_slide_duration=None, time_stamp_col=None):
+    """
+    Method to perform Spark operation Kafka stream.
+    NOTE: Kafka topic to be processesd should have atleast one row before initiating the spark session. 
+    """
+    kafka_consumer_params = SettingKafkaConsumerParams()         # Define the Kafka consumer parameters
+    schema                = DefiningRequestSchema()              # Define the schema for the incoming JSON data
+    spark                 = DefiningSparkSession()               # Define the spark session with app name and spark version
+    
+    # extra options default assignment
+    if window_size        is None: window_size        = "60 seconds"    # default
+    if win_slide_duration is None: win_slide_duration = "30 seconds"    # default
+    if time_stamp_col     is None: time_stamp_col     = "time_stamp"    # default
+    
+    try:
+        # Read data from Kafka
+        raw_stream_data = spark \
+            .readStream \
+            .format("kafka") \
+            .options(**kafka_consumer_params) \
+            .load()
+
+        # Convert the value column from Kafka to a string
+        stream_data          = raw_stream_data.selectExpr("CAST(value AS STRING)")
+        # Parse the JSON string into a DataFrame with the defined schema
+        parsed_stream_data   = stream_data.withColumn("parsed_value", from_json(col("value"), schema))
+        # Select the parsed fields
+        final_stream_data    = parsed_stream_data.select("parsed_value.*")
+        # Convert the time_stamp to proper timestamp (assuming it's in ISO format)
+        final_stream_data    = final_stream_data.withColumn(time_stamp_col, col(time_stamp_col).cast(TimestampType()))
+        # Filter the stream to only include rows where the kpi_id is in the kpi_list
+        filtered_stream_data = final_stream_data.filter(col("kpi_id").isin(kpi_list))
+         # Define a window for aggregation
+        windowed_stream_data = filtered_stream_data \
+                                .groupBy(
+                                    window( col(time_stamp_col), 
+                                           window_size, slideDuration=win_slide_duration
+                                           ),
+                                    col("kpi_id")
+                                ) \
+                                .agg(*GetAggregations(oper_list))
+        # Apply thresholds to the aggregated data
+        thresholded_stream_data = ApplyThresholds(windowed_stream_data, thresholds)
+
+        # --- This will write output on console: FOR TESTING PURPOSES
+        # Start the Spark streaming query
+        # query = thresholded_stream_data \
+        #     .writeStream \
+        #     .outputMode("update") \
+        #     .format("console") 
+
+        # --- This will write output to Kafka: ACTUAL IMPLEMENTATION
+        query = thresholded_stream_data \
+            .selectExpr(f"'{key}' AS key", "to_json(struct(*)) AS value") \
+            .writeStream \
+            .format("kafka") \
+            .option("kafka.bootstrap.servers", KafkaConfig.get_kafka_address()) \
+            .option("topic",                   KafkaTopic.ANALYTICS_RESPONSE.value) \
+            .option("checkpointLocation",      "analytics/.spark/checkpoint") \
+            .outputMode("update")
+
+        # Start the query execution
+        queryHandler = query.start()
+
+        # Loop to check for stop event flag. To be set by stop collector method.
+        while True:
+            if stop_event.is_set():
+                LOGGER.debug("Stop Event activated. Terminating in 5 seconds...")
+                print       ("Stop Event activated. Terminating in 5 seconds...")
+                time.sleep(5)
+                queryHandler.stop()
+                break
+            time.sleep(5)
+
+    except Exception as e:
+        print("Error in Spark streaming process: {:}".format(e))
+        LOGGER.debug("Error in Spark streaming process: {:}".format(e))
diff --git a/src/analytics/backend/service/__init__.py b/src/analytics/backend/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02
--- /dev/null
+++ b/src/analytics/backend/service/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/analytics/backend/service/__main__.py b/src/analytics/backend/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c4c36b7c7bd952164bf9e48a45e22fb00575564
--- /dev/null
+++ b/src/analytics/backend/service/__main__.py
@@ -0,0 +1,56 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from prometheus_client import start_http_server
+from common.Settings import get_log_level, get_metrics_port
+from .AnalyticsBackendService import AnalyticsBackendService
+
+terminate = threading.Event()
+LOGGER = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global LOGGER # pylint: disable=global-statement
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+    LOGGER = logging.getLogger(__name__)
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.info('Starting...')
+
+    # Start metrics server
+    metrics_port = get_metrics_port()
+    start_http_server(metrics_port)
+
+    grpc_service = AnalyticsBackendService()
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=1.0): pass
+
+    LOGGER.info('Terminating...')
+    grpc_service.stop()
+
+    LOGGER.info('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/analytics/backend/tests/__init__.py b/src/analytics/backend/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02
--- /dev/null
+++ b/src/analytics/backend/tests/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/analytics/backend/tests/messages.py b/src/analytics/backend/tests/messages.py
new file mode 100644
index 0000000000000000000000000000000000000000..9acd6ad9dffe4a5b10b107a6923ed85170ee141f
--- /dev/null
+++ b/src/analytics/backend/tests/messages.py
@@ -0,0 +1,34 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def get_kpi_id_list():
+    return ["6e22f180-ba28-4641-b190-2287bf448888", "1e22f180-ba28-4641-b190-2287bf446666"]
+
+def get_operation_list():
+    return [ 'avg', 'max' ]     # possibilities ['avg', 'min', 'max', 'first', 'last', 'stdev']
+
+def get_threshold_dict():
+    threshold_dict = {
+        'avg_value'    : (20, 30),
+        'min_value'    : (00, 10), 
+        'max_value'    : (45, 50),
+        'first_value'  : (00, 10),
+        'last_value'   : (40, 50),
+        'stdev_value'  : (00, 10),
+    }
+    # Filter threshold_dict based on the operation_list
+    return {
+        op + '_value': threshold_dict[op+'_value'] for op in get_operation_list() if op + '_value' in threshold_dict
+    }
diff --git a/src/analytics/backend/tests/test_backend.py b/src/analytics/backend/tests/test_backend.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f40faba94ef7081db609116e8fd869e3d119a24
--- /dev/null
+++ b/src/analytics/backend/tests/test_backend.py
@@ -0,0 +1,64 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+import logging
+import threading
+from common.tools.kafka.Variables import KafkaTopic
+from analytics.backend.service.AnalyticsBackendService import AnalyticsBackendService
+from analytics.backend.tests.messages import get_kpi_id_list, get_operation_list, get_threshold_dict
+
+LOGGER = logging.getLogger(__name__)
+
+
+###########################
+# Tests Implementation of Telemetry Backend
+###########################
+
+# --- "test_validate_kafka_topics" should be run before the functionality tests ---
+def test_validate_kafka_topics():
+    LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ")
+    response = KafkaTopic.create_all_topics()
+    assert isinstance(response, bool)
+
+# def test_StartRequestListener():
+#     LOGGER.info('test_RunRequestListener')
+#     AnalyticsBackendServiceObj = AnalyticsBackendService()
+#     response = AnalyticsBackendServiceObj.StartRequestListener() # response is Tuple (thread, stop_event)
+#     LOGGER.debug(str(response)) 
+#     assert isinstance(response, tuple)
+
+# To test START and STOP communication together
+def test_StopRequestListener():
+    LOGGER.info('test_RunRequestListener')
+    LOGGER.info('Initiating StartRequestListener...')
+    AnalyticsBackendServiceObj = AnalyticsBackendService()
+    response_thread = AnalyticsBackendServiceObj.StartRequestListener() # response is Tuple (thread, stop_event)
+    # LOGGER.debug(str(response_thread))
+    time.sleep(10)
+    LOGGER.info('Initiating StopRequestListener...')
+    AnalyticsBackendServiceObj = AnalyticsBackendService()
+    response = AnalyticsBackendServiceObj.StopRequestListener(response_thread)
+    LOGGER.debug(str(response)) 
+    assert isinstance(response, bool)
+
+# To independently tests the SparkListener functionality
+# def test_SparkListener():
+#     LOGGER.info('test_RunRequestListener')
+#     AnalyticsBackendServiceObj = AnalyticsBackendService()
+#     response = AnalyticsBackendServiceObj.RunSparkStreamer(
+#         get_kpi_id_list(), get_operation_list(), get_threshold_dict()
+#         )
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, bool)
diff --git a/src/analytics/database/AnalyzerEngine.py b/src/analytics/database/AnalyzerEngine.py
new file mode 100644
index 0000000000000000000000000000000000000000..9294e09966ef9e13c9cfa3cab590e5d0c8b6a80e
--- /dev/null
+++ b/src/analytics/database/AnalyzerEngine.py
@@ -0,0 +1,40 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, sqlalchemy
+from common.Settings import get_setting
+
+LOGGER = logging.getLogger(__name__)
+CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}'
+
+class AnalyzerEngine:
+    @staticmethod
+    def get_engine() -> sqlalchemy.engine.Engine:
+        crdb_uri = get_setting('CRDB_URI', default=None)
+        if crdb_uri is None:
+            CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE')
+            CRDB_SQL_PORT  = get_setting('CRDB_SQL_PORT')
+            CRDB_DATABASE  = "tfs-analyzer"             # TODO: define variable get_setting('CRDB_DATABASE_KPI_MGMT')
+            CRDB_USERNAME  = get_setting('CRDB_USERNAME')
+            CRDB_PASSWORD  = get_setting('CRDB_PASSWORD')
+            CRDB_SSLMODE   = get_setting('CRDB_SSLMODE')
+            crdb_uri = CRDB_URI_TEMPLATE.format(
+                CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
+        try:
+            engine = sqlalchemy.create_engine(crdb_uri, echo=False)
+            LOGGER.info(' AnalyzerDB initalized with DB URL: {:}'.format(crdb_uri))
+        except: # pylint: disable=bare-except # pragma: no cover
+            LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri)))
+            return None # type: ignore
+        return engine
diff --git a/src/analytics/database/AnalyzerModel.py b/src/analytics/database/AnalyzerModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..c33e396e06a8dce96a86951a64aa59b510931dfe
--- /dev/null
+++ b/src/analytics/database/AnalyzerModel.py
@@ -0,0 +1,106 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import enum
+
+from sqlalchemy     import Column, String, Float, Enum, BigInteger, JSON
+from sqlalchemy.orm import registry
+from common.proto   import analytics_frontend_pb2
+from common.proto   import kpi_manager_pb2
+
+from sqlalchemy.dialects.postgresql import UUID, ARRAY
+
+
+logging.basicConfig(level=logging.INFO)
+LOGGER = logging.getLogger(__name__)
+
+# Create a base class for declarative models
+Base = registry().generate_base()
+
+class AnalyzerOperationMode (enum.Enum):
+    BATCH     = analytics_frontend_pb2.AnalyzerOperationMode.ANALYZEROPERATIONMODE_BATCH
+    STREAMING = analytics_frontend_pb2.AnalyzerOperationMode.ANALYZEROPERATIONMODE_STREAMING
+
+class Analyzer(Base):
+    __tablename__ = 'analyzer'
+
+    analyzer_id           = Column( UUID(as_uuid=False)        , primary_key=True)
+    algorithm_name        = Column( String                     , nullable=False  )
+    input_kpi_ids         = Column( ARRAY(UUID(as_uuid=False)) , nullable=False  )
+    output_kpi_ids        = Column( ARRAY(UUID(as_uuid=False)) , nullable=False  )
+    operation_mode        = Column( Enum(AnalyzerOperationMode), nullable=False  )
+    parameters            = Column( JSON                       , nullable=True   )
+    batch_min_duration_s  = Column( Float                      , nullable=False  )
+    batch_max_duration_s  = Column( Float                      , nullable=False  )
+    batch_min_size        = Column( BigInteger                 , nullable=False  )
+    batch_max_size        = Column( BigInteger                 , nullable=False  )
+
+    # helps in logging the information
+    def __repr__(self):
+            return (f"<Analyzer(analyzer_id='{self.analyzer_id}'       , algorithm_name='{self.algorithm_name}', "
+                    f"input_kpi_ids={self.input_kpi_ids}               , output_kpi_ids={self.output_kpi_ids}, "
+                    f"operation_mode='{self.operation_mode}'           , parameters={self.parameters}, "
+                    f"batch_min_duration_s={self.batch_min_duration_s} , batch_max_duration_s={self.batch_max_duration_s}, "
+                    f"batch_min_size={self.batch_min_size}             , batch_max_size={self.batch_max_size})>")
+
+
+    @classmethod
+    def ConvertAnalyzerToRow(cls, request):
+        """
+        Create an instance of Analyzer table rows from a request object.
+        Args:    request: The request object containing analyzer gRPC message.
+        Returns: A row (an instance of Analyzer table) initialized with content of the request.
+        """
+        return cls(
+            analyzer_id          = request.analyzer_id.analyzer_id.uuid,
+            algorithm_name       = request.algorithm_name,
+            input_kpi_ids        = [k.kpi_id.uuid for k in request.input_kpi_ids],
+            output_kpi_ids       = [k.kpi_id.uuid for k in request.output_kpi_ids],
+            operation_mode       = AnalyzerOperationMode(request.operation_mode),   # converts integer to coresponding Enum class member
+            parameters           = dict(request.parameters),
+            batch_min_duration_s = request.batch_min_duration_s,
+            batch_max_duration_s = request.batch_max_duration_s,
+            batch_min_size       = request.batch_min_size,
+            batch_max_size       = request.batch_max_size
+        )
+
+    @classmethod
+    def ConvertRowToAnalyzer(cls, row):
+        """
+        Create and return an Analyzer gRPC message initialized with the content of a row.
+        Args: row: The Analyzer table instance (row) containing the data.
+        Returns:   An Analyzer gRPC message initialized with the content of the row.
+        """
+        # Create an instance of the Analyzer message
+        response                              = analytics_frontend_pb2.Analyzer()
+        response.analyzer_id.analyzer_id.uuid = row.analyzer_id
+        response.algorithm_name               = row.algorithm_name
+        response.operation_mode               = row.operation_mode.value
+        response.parameters.update(row.parameters)
+        
+        for input_kpi_id in row.input_kpi_ids:
+            _kpi_id = kpi_manager_pb2.KpiId()
+            _kpi_id.kpi_id.uuid = input_kpi_id
+            response.input_kpi_ids.append(_kpi_id)
+        for output_kpi_id in row.output_kpi_ids:
+            _kpi_id = kpi_manager_pb2.KpiId()
+            _kpi_id.kpi_id.uuid = output_kpi_id
+            response.output_kpi_ids.append(_kpi_id)
+
+        response.batch_min_duration_s = row.batch_min_duration_s
+        response.batch_max_duration_s = row.batch_max_duration_s
+        response.batch_min_size       = row.batch_min_size
+        response.batch_max_size       = row.batch_max_size
+        return response
diff --git a/src/analytics/database/Analyzer_DB.py b/src/analytics/database/Analyzer_DB.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ba68989a066e4638adc12e65289ed50b740731d
--- /dev/null
+++ b/src/analytics/database/Analyzer_DB.py
@@ -0,0 +1,150 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import sqlalchemy_utils
+
+from sqlalchemy     import inspect, or_
+from sqlalchemy.orm import sessionmaker
+
+from analytics.database.AnalyzerModel         import Analyzer as AnalyzerModel
+from analytics.database.AnalyzerEngine        import AnalyzerEngine
+from common.method_wrappers.ServiceExceptions import (OperationFailedException, AlreadyExistsException)
+
+LOGGER = logging.getLogger(__name__)
+DB_NAME = "tfs_analyzer"        # TODO: export name from enviornment variable
+
+class AnalyzerDB:
+    def __init__(self):
+        self.db_engine = AnalyzerEngine.get_engine()
+        if self.db_engine is None:
+            LOGGER.error('Unable to get SQLAlchemy DB Engine...')
+            return False
+        self.db_name = DB_NAME
+        self.Session = sessionmaker(bind=self.db_engine)
+
+    def create_database(self):
+        if not sqlalchemy_utils.database_exists(self.db_engine.url):
+            LOGGER.debug("Database created. {:}".format(self.db_engine.url))
+            sqlalchemy_utils.create_database(self.db_engine.url)
+
+    def drop_database(self) -> None:
+        if sqlalchemy_utils.database_exists(self.db_engine.url):
+            sqlalchemy_utils.drop_database(self.db_engine.url)
+
+    def create_tables(self):
+        try:
+            AnalyzerModel.metadata.create_all(self.db_engine)     # type: ignore
+            LOGGER.debug("Tables created in the database: {:}".format(self.db_name))
+        except Exception as e:
+            LOGGER.debug("Tables cannot be created in the database. {:s}".format(str(e)))
+            raise OperationFailedException ("Tables can't be created", extra_details=["unable to create table {:}".format(e)])
+
+    def verify_tables(self):
+        try:
+            inspect_object = inspect(self.db_engine)
+            if(inspect_object.has_table('analyzer', None)):
+                LOGGER.info("Table exists in DB: {:}".format(self.db_name))
+        except Exception as e:
+            LOGGER.info("Unable to fetch Table names. {:s}".format(str(e)))
+
+# ----------------- CURD OPERATIONS ---------------------
+
+    def add_row_to_db(self, row):
+        session = self.Session()
+        try:
+            session.add(row)
+            session.commit()
+            LOGGER.debug(f"Row inserted into {row.__class__.__name__} table.")
+            return True
+        except Exception as e:
+            session.rollback()
+            if "psycopg2.errors.UniqueViolation" in str(e):
+                LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}")
+                raise AlreadyExistsException(row.__class__.__name__, row,
+                                             extra_details=["Unique key voilation: {:}".format(e)] )
+            else:
+                LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}")
+                raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)])
+        finally:
+            session.close()
+    
+    def search_db_row_by_id(self, model, col_name, id_to_search):
+        session = self.Session()
+        try:
+            entity = session.query(model).filter_by(**{col_name: id_to_search}).first()
+            if entity:
+                # LOGGER.debug(f"{model.__name__} ID found: {str(entity)}")
+                return entity
+            else:
+                LOGGER.debug(f"{model.__name__} ID not found, No matching row: {str(id_to_search)}")
+                print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search))
+                return None
+        except Exception as e:
+            session.rollback()
+            LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}")
+            raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)])
+        finally:
+            session.close()
+    
+    def delete_db_row_by_id(self, model, col_name, id_to_search):
+        session = self.Session()
+        try:
+            record = session.query(model).filter_by(**{col_name: id_to_search}).first()
+            if record:
+                session.delete(record)
+                session.commit()
+                LOGGER.debug("Deleted %s with %s: %s", model.__name__, col_name, id_to_search)
+            else:
+                LOGGER.debug("%s with %s %s not found", model.__name__, col_name, id_to_search)
+                return None
+        except Exception as e:
+            session.rollback()
+            LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e)
+            raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)])
+        finally:
+            session.close()
+    
+    def select_with_filter(self, model, filter_object):
+        session = self.Session()
+        try:
+            query = session.query(AnalyzerModel)
+            
+            # Apply filters based on the filter_object
+            if filter_object.analyzer_id:
+                query = query.filter(AnalyzerModel.analyzer_id.in_([a.analyzer_id.uuid for a in filter_object.analyzer_id]))
+
+            if filter_object.algorithm_names:
+                query = query.filter(AnalyzerModel.algorithm_name.in_(filter_object.algorithm_names))
+
+            if filter_object.input_kpi_ids:
+                input_kpi_uuids = [k.kpi_id.uuid for k in filter_object.input_kpi_ids]
+                query = query.filter(AnalyzerModel.input_kpi_ids.op('&&')(input_kpi_uuids))
+
+            if filter_object.output_kpi_ids:
+                output_kpi_uuids = [k.kpi_id.uuid for k in filter_object.output_kpi_ids]
+                query = query.filter(AnalyzerModel.output_kpi_ids.op('&&')(output_kpi_uuids))
+
+            result = query.all()
+            # query should be added to return all rows
+            if result:
+                LOGGER.debug(f"Fetched filtered rows from {model.__name__} table with filters: {filter_object}") #  - Results: {result}
+            else:
+                LOGGER.warning(f"No matching row found in {model.__name__} table with filters: {filter_object}")
+            return result
+        except Exception as e:
+            LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filter_object} ::: {e}")
+            raise OperationFailedException ("Select by filter", extra_details=["unable to apply the filter {:}".format(e)])
+        finally:
+            session.close()
diff --git a/src/telemetry/frontend/tests/__init__.py b/src/analytics/database/__init__.py
similarity index 100%
rename from src/telemetry/frontend/tests/__init__.py
rename to src/analytics/database/__init__.py
diff --git a/src/analytics/frontend/Dockerfile b/src/analytics/frontend/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..10499713f318a23e1aeab49c96e8163a5ec147fa
--- /dev/null
+++ b/src/analytics/frontend/Dockerfile
@@ -0,0 +1,70 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ git && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/analytics/frontend
+WORKDIR /var/teraflow/analytics/frontend
+COPY src/analytics/frontend/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/analytics/__init__.py analytics/__init__.py
+COPY src/analytics/frontend/. analytics/frontend/
+COPY src/analytics/database/. analytics/database/
+
+# Start the service
+ENTRYPOINT ["python", "-m", "analytics.frontend.service"]
diff --git a/src/analytics/frontend/__init__.py b/src/analytics/frontend/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/analytics/frontend/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/analytics/frontend/client/AnalyticsFrontendClient.py b/src/analytics/frontend/client/AnalyticsFrontendClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..90e95d661d46f24ae5ffaeb7bcfa19b7e1f36526
--- /dev/null
+++ b/src/analytics/frontend/client/AnalyticsFrontendClient.py
@@ -0,0 +1,68 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from common.Constants                         import ServiceNameEnum
+from common.proto.context_pb2                 import Empty
+from common.proto.analytics_frontend_pb2_grpc import AnalyticsFrontendServiceStub
+from common.proto.analytics_frontend_pb2      import AnalyzerId, Analyzer, AnalyzerFilter, AnalyzerList
+from common.Settings                          import get_service_host, get_service_port_grpc
+from common.tools.grpc.Tools                  import grpc_message_to_json_string
+from common.tools.client.RetryDecorator       import retry, delay_exponential
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 10
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class AnalyticsFrontendClient:
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.ANALYTICSFRONTEND)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.ANALYTICSFRONTEND)
+        self.endpoint     = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
+        self.channel      = None
+        self.stub         = None
+        self.connect()
+        LOGGER.debug('Channel created')
+
+    def connect(self):
+        self.channel      = grpc.insecure_channel(self.endpoint)
+        self.stub         = AnalyticsFrontendServiceStub(self.channel)
+
+    def close(self):
+        if self.channel is not None: self.channel.close()
+        self.channel      = None
+        self.stub         = None
+
+    @RETRY_DECORATOR
+    def StartAnalyzer (self, request: Analyzer) -> AnalyzerId: #type: ignore
+        LOGGER.debug('StartAnalyzer: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.StartAnalyzer(request)
+        LOGGER.debug('StartAnalyzer result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def StopAnalyzer(self, request : AnalyzerId) -> Empty: # type: ignore
+        LOGGER.debug('StopAnalyzer: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.StopAnalyzer(request)
+        LOGGER.debug('StopAnalyzer result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+    
+    @RETRY_DECORATOR
+    def SelectAnalyzers(self, request : AnalyzerFilter) -> AnalyzerList: # type: ignore
+        LOGGER.debug('SelectAnalyzers: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SelectAnalyzers(request)
+        LOGGER.debug('SelectAnalyzers result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
diff --git a/src/analytics/frontend/client/__init__.py b/src/analytics/frontend/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/analytics/frontend/client/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/analytics/frontend/requirements.in b/src/analytics/frontend/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..d81b9ddbeafeff94c830d48ca5594e775b9ce240
--- /dev/null
+++ b/src/analytics/frontend/requirements.in
@@ -0,0 +1,20 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apscheduler==3.10.4
+confluent-kafka==2.3.*
+psycopg2-binary==2.9.*
+SQLAlchemy==1.4.*
+sqlalchemy-cockroachdb==1.4.*
+SQLAlchemy-Utils==0.38.*
diff --git a/src/analytics/frontend/service/AnalyticsFrontendService.py b/src/analytics/frontend/service/AnalyticsFrontendService.py
new file mode 100644
index 0000000000000000000000000000000000000000..42a7fc9b60418c1c0fc5af6f320ae5c330ce8871
--- /dev/null
+++ b/src/analytics/frontend/service/AnalyticsFrontendService.py
@@ -0,0 +1,28 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from common.proto.analytics_frontend_pb2_grpc import add_AnalyticsFrontendServiceServicer_to_server
+from analytics.frontend.service.AnalyticsFrontendServiceServicerImpl import AnalyticsFrontendServiceServicerImpl
+
+class AnalyticsFrontendService(GenericGrpcService):
+    def __init__(self, cls_name: str = __name__):
+        port = get_service_port_grpc(ServiceNameEnum.ANALYTICSFRONTEND)
+        super().__init__(port, cls_name=cls_name)
+        self.analytics_frontend_servicer = AnalyticsFrontendServiceServicerImpl()
+    
+    def install_servicers(self):
+        add_AnalyticsFrontendServiceServicer_to_server(self.analytics_frontend_servicer, self.server)
diff --git a/src/analytics/frontend/service/AnalyticsFrontendServiceServicerImpl.py b/src/analytics/frontend/service/AnalyticsFrontendServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..8bb6a17afb5b911e3652fdb8d1853b5b7bc6faf3
--- /dev/null
+++ b/src/analytics/frontend/service/AnalyticsFrontendServiceServicerImpl.py
@@ -0,0 +1,214 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import logging, grpc, json, queue
+
+from typing          import Dict
+from confluent_kafka import Consumer as KafkaConsumer
+from confluent_kafka import Producer as KafkaProducer
+from confluent_kafka import KafkaError
+
+from common.tools.kafka.Variables             import KafkaConfig, KafkaTopic
+from common.proto.context_pb2                 import Empty
+from common.method_wrappers.Decorator         import MetricsPool, safe_and_metered_rpc_method
+from common.proto.analytics_frontend_pb2      import Analyzer, AnalyzerId, AnalyzerFilter, AnalyzerList
+from common.proto.analytics_frontend_pb2_grpc import AnalyticsFrontendServiceServicer
+from analytics.database.Analyzer_DB           import AnalyzerDB
+from analytics.database.AnalyzerModel         import Analyzer as AnalyzerModel
+from apscheduler.schedulers.background        import BackgroundScheduler
+from apscheduler.triggers.interval            import IntervalTrigger
+
+LOGGER           = logging.getLogger(__name__)
+METRICS_POOL     = MetricsPool('AnalyticsFrontend', 'NBIgRPC')
+
+class AnalyticsFrontendServiceServicerImpl(AnalyticsFrontendServiceServicer):
+    def __init__(self):
+        LOGGER.info('Init AnalyticsFrontendService')
+        self.listener_topic = KafkaTopic.ANALYTICS_RESPONSE.value
+        self.db_obj         = AnalyzerDB()
+        self.result_queue   = queue.Queue()
+        self.scheduler      = BackgroundScheduler()
+        self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()})
+        self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(),
+                                            'group.id'           : 'analytics-frontend',
+                                            'auto.offset.reset'  : 'latest'})
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def StartAnalyzer(self, 
+                       request : Analyzer, grpc_context: grpc.ServicerContext # type: ignore
+                      ) -> AnalyzerId: # type: ignore
+        LOGGER.info ("At Service gRPC message: {:}".format(request))
+        response = AnalyzerId()
+
+        self.db_obj.add_row_to_db(
+            AnalyzerModel.ConvertAnalyzerToRow(request)
+        )
+        self.PublishStartRequestOnKafka(request)
+        
+        response.analyzer_id.uuid = request.analyzer_id.analyzer_id.uuid
+        return response
+
+    def PublishStartRequestOnKafka(self, analyzer_obj):
+        """
+        Method to generate analyzer request on Kafka.
+        """
+        analyzer_uuid = analyzer_obj.analyzer_id.analyzer_id.uuid
+        analyzer_to_generate : Dict = {
+            "algo_name"       : analyzer_obj.algorithm_name,
+            "input_kpis"      : [k.kpi_id.uuid for k in analyzer_obj.input_kpi_ids],
+            "output_kpis"     : [k.kpi_id.uuid for k in analyzer_obj.output_kpi_ids],
+            "oper_mode"       : analyzer_obj.operation_mode,
+            "thresholds"      : json.loads(analyzer_obj.parameters["thresholds"]),
+            "window_size"     : analyzer_obj.parameters["window_size"],
+            "window_slider"   : analyzer_obj.parameters["window_slider"],
+            # "store_aggregate" : analyzer_obj.parameters["store_aggregate"] 
+        }
+        self.kafka_producer.produce(
+            KafkaTopic.ANALYTICS_REQUEST.value,
+            key      = analyzer_uuid,
+            value    = json.dumps(analyzer_to_generate),
+            callback = self.delivery_callback
+        )
+        LOGGER.info("Analyzer Start Request Generated: Analyzer Id: {:}, Value: {:}".format(analyzer_uuid, analyzer_to_generate))
+        self.kafka_producer.flush()
+        
+        # self.StartResponseListener(analyzer_uuid)
+
+    def StartResponseListener(self, filter_key=None):
+        """
+        Start the Kafka response listener with APScheduler and return key-value pairs periodically.
+        """
+        LOGGER.info("Starting StartResponseListener")
+        # Schedule the ResponseListener at fixed intervals
+        self.scheduler.add_job(
+            self.response_listener,
+            trigger=IntervalTrigger(seconds=5),
+            args=[filter_key], 
+            id=f"response_listener_{self.listener_topic}",
+            replace_existing=True
+        )
+        self.scheduler.start()
+        LOGGER.info(f"Started Kafka listener for topic {self.listener_topic}...")
+        try:
+            while True:
+                LOGGER.info("entering while...")
+                key, value = self.result_queue.get()  # Wait until a result is available
+                LOGGER.info("In while true ...")
+                yield key, value  # Yield the result to the calling function
+        except KeyboardInterrupt:
+            LOGGER.warning("Listener stopped manually.")
+        finally:
+            self.StopListener()
+
+    def response_listener(self, filter_key=None):
+        """
+        Poll Kafka messages and put key-value pairs into the queue.
+        """
+        LOGGER.info(f"Polling Kafka topic {self.listener_topic}...")
+
+        consumer = self.kafka_consumer
+        consumer.subscribe([self.listener_topic])
+        msg = consumer.poll(2.0)
+        if msg is None:
+            return
+        elif msg.error():
+            if msg.error().code() != KafkaError._PARTITION_EOF:
+                LOGGER.error(f"Kafka error: {msg.error()}")
+            return
+
+        try:
+            key = msg.key().decode('utf-8') if msg.key() else None
+            if filter_key is not None and key == filter_key:
+                value = json.loads(msg.value().decode('utf-8'))
+                LOGGER.info(f"Received key: {key}, value: {value}")
+                self.result_queue.put((key, value))
+            else:
+                LOGGER.info(f"Skipping message with unmatched key: {key}")
+                # value = json.loads(msg.value().decode('utf-8')) # Added for debugging
+                # self.result_queue.put((filter_key, value))             # Added for debugging
+        except Exception as e:
+            LOGGER.error(f"Error processing Kafka message: {e}")
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def StopAnalyzer(self, 
+                      request : AnalyzerId, grpc_context: grpc.ServicerContext # type: ignore
+                     ) -> Empty:  # type: ignore
+        LOGGER.info ("At Service gRPC message: {:}".format(request))
+        try:
+            analyzer_id_to_delete = request.analyzer_id.uuid
+            self.db_obj.delete_db_row_by_id(
+                AnalyzerModel, "analyzer_id", analyzer_id_to_delete
+            )
+            self.PublishStopRequestOnKafka(analyzer_id_to_delete)
+        except Exception as e:
+            LOGGER.error('Unable to delete analyzer. Error: {:}'.format(e))
+        return Empty()
+
+    def PublishStopRequestOnKafka(self, analyzer_uuid):
+        """
+        Method to generate stop analyzer request on Kafka.
+        """
+        # analyzer_uuid = analyzer_id.analyzer_id.uuid
+        analyzer_to_stop :  Dict = {
+            "algo_name"   : None,
+            "input_kpis"  : [],
+            "output_kpis" : [],
+            "oper_mode"   : None
+        }
+        self.kafka_producer.produce(
+            KafkaTopic.ANALYTICS_REQUEST.value,
+            key      = analyzer_uuid,
+            value    = json.dumps(analyzer_to_stop),
+            callback = self.delivery_callback
+        )
+        LOGGER.info("Analyzer Stop Request Generated: Analyzer Id: {:}".format(analyzer_uuid))
+        self.kafka_producer.flush()
+        self.StopListener()
+
+    def StopListener(self):
+        """
+        Gracefully stop the Kafka listener and the scheduler.
+        """
+        LOGGER.info("Stopping Kafka listener...")
+        self.scheduler.shutdown()
+        LOGGER.info("Kafka listener stopped.")
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectAnalyzers(self, 
+                         filter : AnalyzerFilter, contextgrpc_context: grpc.ServicerContext # type: ignore
+                        ) -> AnalyzerList:  # type: ignore
+        LOGGER.info("At Service gRPC message: {:}".format(filter))
+        response = AnalyzerList()
+        try:
+            rows = self.db_obj.select_with_filter(AnalyzerModel, filter)
+            try:
+                for row in rows:
+                    response.analyzer_list.append(
+                        AnalyzerModel.ConvertRowToAnalyzer(row)
+                    )
+                return response
+            except Exception as e:
+                LOGGER.info('Unable to process filter response {:}'.format(e))
+        except Exception as e:
+            LOGGER.error('Unable to apply filter on table {:}. ERROR: {:}'.format(AnalyzerModel.__name__, e))
+       
+
+    def delivery_callback(self, err, msg):
+        if err:
+            LOGGER.debug('Message delivery failed: {:}'.format(err))
+            print       ('Message delivery failed: {:}'.format(err))
+        # else:
+        #     LOGGER.debug('Message delivered to topic {:}'.format(msg.topic()))
+        #     print('Message delivered to topic {:}'.format(msg.topic()))
diff --git a/src/analytics/frontend/service/__init__.py b/src/analytics/frontend/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/analytics/frontend/service/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/analytics/frontend/service/__main__.py b/src/analytics/frontend/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c331844f45d98095ef98951f3db43a0e2f0c69c
--- /dev/null
+++ b/src/analytics/frontend/service/__main__.py
@@ -0,0 +1,56 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from prometheus_client import start_http_server
+from common.Settings import get_log_level, get_metrics_port
+from .AnalyticsFrontendService import AnalyticsFrontendService
+
+terminate = threading.Event()
+LOGGER = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global LOGGER # pylint: disable=global-statement
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+    LOGGER = logging.getLogger(__name__)
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.info('Starting...')
+
+    # Start metrics server
+    metrics_port = get_metrics_port()
+    start_http_server(metrics_port)
+
+    grpc_service = AnalyticsFrontendService()
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=1.0): pass
+
+    LOGGER.info('Terminating...')
+    grpc_service.stop()
+
+    LOGGER.info('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/analytics/frontend/tests/__init__.py b/src/analytics/frontend/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/analytics/frontend/tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/analytics/frontend/tests/messages.py b/src/analytics/frontend/tests/messages.py
new file mode 100644
index 0000000000000000000000000000000000000000..646de962e8a213582fdb7cd1446ab57bda561a96
--- /dev/null
+++ b/src/analytics/frontend/tests/messages.py
@@ -0,0 +1,84 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid
+import json
+from common.proto.kpi_manager_pb2        import KpiId
+from common.proto.analytics_frontend_pb2 import ( AnalyzerOperationMode, AnalyzerId,
+                                                Analyzer, AnalyzerFilter )
+
+def create_analyzer_id():
+    _create_analyzer_id                  = AnalyzerId()
+    # _create_analyzer_id.analyzer_id.uuid = str(uuid.uuid4())
+    _create_analyzer_id.analyzer_id.uuid = "efef4d95-1cf1-43c4-9742-95c283ddd7a6"
+    return _create_analyzer_id
+
+def create_analyzer():
+    _create_analyzer                              = Analyzer()
+    # _create_analyzer.analyzer_id.analyzer_id.uuid = str(uuid.uuid4())
+    _create_analyzer.analyzer_id.analyzer_id.uuid = "efef4d95-1cf1-43c4-9742-95c283ddd7a6"
+    _create_analyzer.algorithm_name               = "Test_Aggergate_and_Threshold"
+    _create_analyzer.operation_mode               = AnalyzerOperationMode.ANALYZEROPERATIONMODE_STREAMING
+    
+    _kpi_id = KpiId()
+    # input IDs to analyze
+    _kpi_id.kpi_id.uuid              = str(uuid.uuid4())
+    _kpi_id.kpi_id.uuid              = "6e22f180-ba28-4641-b190-2287bf448888"
+    _create_analyzer.input_kpi_ids.append(_kpi_id)
+    _kpi_id.kpi_id.uuid              = str(uuid.uuid4())
+    _kpi_id.kpi_id.uuid              = "1e22f180-ba28-4641-b190-2287bf446666"
+    _create_analyzer.input_kpi_ids.append(_kpi_id)
+    _kpi_id.kpi_id.uuid              = str(uuid.uuid4())
+    _create_analyzer.input_kpi_ids.append(_kpi_id)
+    # output IDs after analysis
+    _kpi_id.kpi_id.uuid              = str(uuid.uuid4())
+    _create_analyzer.output_kpi_ids.append(_kpi_id)
+    _kpi_id.kpi_id.uuid              = str(uuid.uuid4())
+    _create_analyzer.output_kpi_ids.append(_kpi_id)
+    # parameter
+    _threshold_dict = {
+        # 'avg_value'   :(20, 30), 'min_value'   :(00, 10), 'max_value'   :(45, 50),
+        'first_value' :(00, 10), 'last_value'  :(40, 50), 'stdev_value':(00, 10)}
+    _create_analyzer.parameters['thresholds']      = json.dumps(_threshold_dict)
+    _create_analyzer.parameters['window_size']     = "60 seconds"     # Such as "10 seconds", "2 minutes", "3 hours", "4 days" or "5 weeks" 
+    _create_analyzer.parameters['window_slider']   = "30 seconds"     # should be less than window size
+    _create_analyzer.parameters['store_aggregate'] = str(False)       # TRUE to store. No implemented yet
+
+    return _create_analyzer
+
+def create_analyzer_filter():
+    _create_analyzer_filter           = AnalyzerFilter()
+
+    _analyzer_id_obj                  = AnalyzerId()
+    # _analyzer_id_obj.analyzer_id.uuid = str(uuid.uuid4())
+    _analyzer_id_obj.analyzer_id.uuid = "efef4d95-1cf1-43c4-9742-95c283ddd7a6"
+    _create_analyzer_filter.analyzer_id.append(_analyzer_id_obj)
+
+    _create_analyzer_filter.algorithm_names.append('Test_Aggergate_and_Threshold')
+
+    # _input_kpi_id_obj                 = KpiId()
+    # _input_kpi_id_obj.kpi_id.uuid     = str(uuid.uuid4())
+    # _create_analyzer_filter.input_kpi_ids.append(_input_kpi_id_obj)
+    # another input kpi Id
+    # _input_kpi_id_obj.kpi_id.uuid     = str(uuid.uuid4())
+    # _create_analyzer_filter.input_kpi_ids.append(_input_kpi_id_obj)
+
+    # _output_kpi_id_obj                = KpiId()
+    # _output_kpi_id_obj.kpi_id.uuid    = str(uuid.uuid4())
+    # _create_analyzer_filter.output_kpi_ids.append(_output_kpi_id_obj)
+    # # another output kpi Id
+    # _output_kpi_id_obj.kpi_id.uuid     = str(uuid.uuid4())
+    # _create_analyzer_filter.input_kpi_ids.append(_output_kpi_id_obj)
+
+    return _create_analyzer_filter
diff --git a/src/analytics/frontend/tests/test_frontend.py b/src/analytics/frontend/tests/test_frontend.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2428c01fb021f71a884d9a99c446bfef6e66559
--- /dev/null
+++ b/src/analytics/frontend/tests/test_frontend.py
@@ -0,0 +1,134 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import time
+import json
+import pytest
+import logging
+import threading
+
+from common.Constants         import ServiceNameEnum
+from common.proto.context_pb2 import Empty
+from common.Settings          import ( get_service_port_grpc, get_env_var_name, 
+                                      ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC )
+
+from common.tools.kafka.Variables                        import KafkaTopic
+from common.proto.analytics_frontend_pb2                 import AnalyzerId, AnalyzerList
+from analytics.frontend.client.AnalyticsFrontendClient   import AnalyticsFrontendClient
+from analytics.frontend.service.AnalyticsFrontendService import AnalyticsFrontendService
+from analytics.frontend.tests.messages                   import ( create_analyzer_id, create_analyzer,
+                                                                 create_analyzer_filter )
+from analytics.frontend.service.AnalyticsFrontendServiceServicerImpl import AnalyticsFrontendServiceServicerImpl
+from apscheduler.schedulers.background                   import BackgroundScheduler
+from apscheduler.triggers.interval                       import IntervalTrigger
+
+
+###########################
+# Tests Setup
+###########################
+
+LOCAL_HOST = '127.0.0.1'
+
+ANALYTICS_FRONTEND_PORT = str(get_service_port_grpc(ServiceNameEnum.ANALYTICSFRONTEND))
+os.environ[get_env_var_name(ServiceNameEnum.ANALYTICSFRONTEND, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.ANALYTICSFRONTEND, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(ANALYTICS_FRONTEND_PORT)
+
+LOGGER = logging.getLogger(__name__)
+
+@pytest.fixture(scope='session')
+def analyticsFrontend_service():
+    LOGGER.info('Initializing AnalyticsFrontendService...')
+
+    _service = AnalyticsFrontendService()
+    _service.start()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding AnalyticsFrontendService...')
+    yield _service
+
+    LOGGER.info('Terminating AnalyticsFrontendService...')
+    _service.stop()
+
+    LOGGER.info('Terminated AnalyticsFrontendService...')
+
+@pytest.fixture(scope='session')
+def analyticsFrontend_client(analyticsFrontend_service : AnalyticsFrontendService):
+    LOGGER.info('Initializing AnalyticsFrontendClient...')
+
+    _client = AnalyticsFrontendClient()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding AnalyticsFrontendClient...')
+    yield _client
+
+    LOGGER.info('Closing AnalyticsFrontendClient...')
+    _client.close()
+
+    LOGGER.info('Closed AnalyticsFrontendClient...')
+
+
+###########################
+# Tests Implementation of Analytics Frontend
+###########################
+
+# --- "test_validate_kafka_topics" should be executed before the functionality tests ---
+def test_validate_kafka_topics():
+    LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ")
+    response = KafkaTopic.create_all_topics()
+    assert isinstance(response, bool)
+
+# ----- core funtionality test -----
+# def test_StartAnalytics(analyticsFrontend_client):
+#     LOGGER.info(' >>> test_StartAnalytic START: <<< ')
+#     response = analyticsFrontend_client.StartAnalyzer(create_analyzer())
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, AnalyzerId)
+
+# To test start and stop listener together
+def test_StartStopAnalyzers(analyticsFrontend_client):
+    LOGGER.info(' >>> test_StartStopAnalyzers START: <<< ')
+    LOGGER.info('--> StartAnalyzer')
+    added_analyzer_id = analyticsFrontend_client.StartAnalyzer(create_analyzer())
+    LOGGER.debug(str(added_analyzer_id))
+    LOGGER.info(' --> Calling StartResponseListener... ')
+    class_obj = AnalyticsFrontendServiceServicerImpl()
+    response =  class_obj.StartResponseListener(added_analyzer_id.analyzer_id._uuid)
+    LOGGER.debug(response)
+    LOGGER.info("waiting for timer to comlete ...")
+    time.sleep(3)
+    LOGGER.info('--> StopAnalyzer')
+    response = analyticsFrontend_client.StopAnalyzer(added_analyzer_id)
+    LOGGER.debug(str(response))
+
+# def test_SelectAnalytics(analyticsFrontend_client):
+#     LOGGER.info(' >>> test_SelectAnalytics START: <<< ')
+#     response = analyticsFrontend_client.SelectAnalyzers(create_analyzer_filter())
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, AnalyzerList)
+
+# def test_StopAnalytic(analyticsFrontend_client):
+#     LOGGER.info(' >>> test_StopAnalytic START: <<< ')
+#     response = analyticsFrontend_client.StopAnalyzer(create_analyzer_id())
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, Empty)
+
+# def test_ResponseListener():
+#         LOGGER.info(' >>> test_ResponseListener START <<< ')
+#         analyzer_id = create_analyzer_id()
+#         LOGGER.debug("Starting Response Listener for Analyzer ID: {:}".format(analyzer_id.analyzer_id.uuid))
+#         class_obj = AnalyticsFrontendServiceServicerImpl()
+#         for response in class_obj.StartResponseListener(analyzer_id.analyzer_id.uuid):
+#             LOGGER.debug(response)
+#             assert isinstance(response, tuple)
\ No newline at end of file
diff --git a/src/analytics/requirements.in b/src/analytics/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..8ff30ddaad25c39713f2e6f68c8d9aebed74dad0
--- /dev/null
+++ b/src/analytics/requirements.in
@@ -0,0 +1,21 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+java==11.0.*
+pyspark==3.5.2
+confluent-kafka==2.3.*
+psycopg2-binary==2.9.*
+SQLAlchemy==1.4.*
+sqlalchemy-cockroachdb==1.4.*
+SQLAlchemy-Utils==0.38.*
diff --git a/src/analytics/tests/__init__.py b/src/analytics/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/analytics/tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/analytics/tests/test_analytics_db.py b/src/analytics/tests/test_analytics_db.py
new file mode 100644
index 0000000000000000000000000000000000000000..58e7d0167044bb461e66b053dcb3999641ea8419
--- /dev/null
+++ b/src/analytics/tests/test_analytics_db.py
@@ -0,0 +1,28 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import logging
+from analytics.database.Analyzer_DB import AnalyzerDB
+
+LOGGER = logging.getLogger(__name__)
+
+def test_verify_databases_and_tables():
+    LOGGER.info('>>> test_verify_databases_and_tables : START <<< ')
+    AnalyzerDBobj = AnalyzerDB()
+    # AnalyzerDBobj.drop_database()
+    # AnalyzerDBobj.verify_tables()
+    AnalyzerDBobj.create_database()
+    AnalyzerDBobj.create_tables()
+    AnalyzerDBobj.verify_tables()
diff --git a/src/common/Constants.py b/src/common/Constants.py
index a28d35d0e28d4d59d2be2cde59876f689f86d496..2f3cef0a7e8351ffa7799bf405e90d816163f1ee 100644
--- a/src/common/Constants.py
+++ b/src/common/Constants.py
@@ -66,6 +66,9 @@ class ServiceNameEnum(Enum):
     KPIVALUEAPI            = 'kpi-value-api'
     KPIVALUEWRITER         = 'kpi-value-writer'
     TELEMETRYFRONTEND      = 'telemetry-frontend'
+    TELEMETRYBACKEND       = 'telemetry-backend'
+    ANALYTICSFRONTEND      = 'analytics-frontend'
+    ANALYTICSBACKEND       = 'analytics-backend'
 
     # Used for test and debugging only
     DLT_GATEWAY    = 'dltgateway'
@@ -100,6 +103,9 @@ DEFAULT_SERVICE_GRPC_PORTS = {
     ServiceNameEnum.KPIVALUEAPI            .value : 30020,
     ServiceNameEnum.KPIVALUEWRITER         .value : 30030,
     ServiceNameEnum.TELEMETRYFRONTEND      .value : 30050,
+    ServiceNameEnum.TELEMETRYBACKEND       .value : 30060,
+    ServiceNameEnum.ANALYTICSFRONTEND      .value : 30080,
+    ServiceNameEnum.ANALYTICSBACKEND       .value : 30090,
 
     # Used for test and debugging only
     ServiceNameEnum.DLT_GATEWAY   .value : 50051,
diff --git a/src/common/tools/kafka/Variables.py b/src/common/tools/kafka/Variables.py
index 24ae2cff7b5e710e18999eb09029216a4a5d6c8a..fc43c315114e7b51c4e2604afbb14e165796e7c5 100644
--- a/src/common/tools/kafka/Variables.py
+++ b/src/common/tools/kafka/Variables.py
@@ -14,23 +14,40 @@
 
 import logging
 from enum import Enum
-from confluent_kafka import KafkaException
 from confluent_kafka.admin import AdminClient, NewTopic
+from common.Settings import get_setting
 
 
 LOGGER = logging.getLogger(__name__)
+KFK_SERVER_ADDRESS_TEMPLATE = 'kafka-service.{:s}.svc.cluster.local:{:s}'
 
 class KafkaConfig(Enum):
-    # SERVER_IP    = "127.0.0.1:9092"
-    SERVER_IP    = "kafka-service.kafka.svc.cluster.local:9092"
-    ADMIN_CLIENT =  AdminClient({'bootstrap.servers': SERVER_IP})
+
+    @staticmethod
+    def get_kafka_address() -> str:
+        # kafka_server_address = get_setting('KFK_SERVER_ADDRESS', default=None)
+        # if kafka_server_address is None:
+        KFK_NAMESPACE        = get_setting('KFK_NAMESPACE')
+        KFK_PORT             = get_setting('KFK_SERVER_PORT')
+        kafka_server_address = KFK_SERVER_ADDRESS_TEMPLATE.format(KFK_NAMESPACE, KFK_PORT)
+        return kafka_server_address
+        
+    @staticmethod
+    def get_admin_client():
+        SERVER_ADDRESS = KafkaConfig.get_kafka_address()
+        ADMIN_CLIENT   = AdminClient({'bootstrap.servers': SERVER_ADDRESS })
+        return ADMIN_CLIENT
+
 
 class KafkaTopic(Enum):
-    REQUEST  = 'topic_request' 
-    RESPONSE = 'topic_response'
-    RAW      = 'topic_raw' 
-    LABELED  = 'topic_labeled'
-    VALUE    = 'topic_value'
+    # TODO: Later to be populated from ENV variable.
+    REQUEST            = 'topic_request' 
+    RESPONSE           = 'topic_response'
+    RAW                = 'topic_raw' 
+    LABELED            = 'topic_labeled'
+    VALUE              = 'topic_value'
+    ANALYTICS_REQUEST  = 'topic_request_analytics'
+    ANALYTICS_RESPONSE = 'topic_response_analytics'
 
     @staticmethod
     def create_all_topics() -> bool:
@@ -38,8 +55,9 @@ class KafkaTopic(Enum):
             Method to create Kafka topics defined as class members
         """
         all_topics = [member.value for member in KafkaTopic]
+        LOGGER.debug("Kafka server address is: {:} ".format(KafkaConfig.get_kafka_address()))
         if( KafkaTopic.create_new_topic_if_not_exists( all_topics )):
-            LOGGER.debug("All topics are created sucsessfully")
+            LOGGER.debug("All topics are created sucsessfully or Already Exists")
             return True
         else:
             LOGGER.debug("Error creating all topics")
@@ -55,14 +73,14 @@ class KafkaTopic(Enum):
         LOGGER.debug("Topics names to be verified and created: {:}".format(new_topics))
         for topic in new_topics:
             try:
-                topic_metadata = KafkaConfig.ADMIN_CLIENT.value.list_topics(timeout=5)
+                topic_metadata = KafkaConfig.get_admin_client().list_topics(timeout=5)
                 # LOGGER.debug("Existing topic list: {:}".format(topic_metadata.topics))
                 if topic not in topic_metadata.topics:
                     # If the topic does not exist, create a new topic
                     print("Topic {:} does not exist. Creating...".format(topic))
                     LOGGER.debug("Topic {:} does not exist. Creating...".format(topic))
                     new_topic = NewTopic(topic, num_partitions=1, replication_factor=1)
-                    KafkaConfig.ADMIN_CLIENT.value.create_topics([new_topic])
+                    KafkaConfig.get_admin_client().create_topics([new_topic])
                 else:
                     print("Topic name already exists: {:}".format(topic))
                     LOGGER.debug("Topic name already exists: {:}".format(topic))
diff --git a/src/kpi_manager/README.md b/src/kpi_manager/README.md
index c1feadcc4843db26a219d1e3b37833ddd80b18dc..6e9b56d9349aa6acd5c41004e32c933619a37f65 100644
--- a/src/kpi_manager/README.md
+++ b/src/kpi_manager/README.md
@@ -1,29 +1,24 @@
 # How to locally run and test KPI manager micro-service
 
-## --- File links need to be updated. ---
 ### Pre-requisets 
-The following requirements should be fulfilled before the execuation of KPI management service.
+Ensure the following requirements are met before executing the KPI management service:
 
-1. Verify that [kpi_management.proto](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/proto/kpi_management.proto) file exists and grpcs file are generated sucessfully. 
-2. Virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/requirements.in) are installed sucessfully.
-3. Verify the creation of required database and table.
-[KPI DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/database/tests/KpiDBtests.py) python file enlist the functions to create tables and database and
-[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/service/database/KpiEngine.py) contains the DB string, update the string as per your deployment.
+1. A virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/requirements.in) sucessfully  installed.
+2. Verify the creation of required database and table. The 
+[KPI DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/tests/test_kpi_db.py) python file lists the functions to create tables and the database. The
+[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/database/KpiEngine.py) file contains the DB string.
 
 ### Messages format templates
-["Messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_messages.py) python file enlist the basic gRPC messages format used during the testing.
+The ["messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/tests/test_messages.py) python file contains templates for creating gRPC messages.
 
-### Test file
-["KPI management test"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_kpi_manager.py) python file enlist different tests conducted during the experiment.
+### Unit test file
+The ["KPI manager test"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_manager/tests/test_kpi_manager.py) python file lists various tests conducted to validate functionality.
 
 ### Flow of execution (Kpi Maanager Service functions)
 1. Call the `create_database()` and `create_tables()` functions from `Kpi_DB` class to create the required database and table if they don't exist. Call `verify_tables` to verify the existence of KPI table.
 
-2. Call the gRPC method `SetKpiDescriptor(KpiDescriptor)->KpiId` to add the KpiDescriptor in `Kpi` DB. `KpiDescriptor` and `KpiId` are both pre-defined gRPC message types.
+2. Call the gRPC method `SetKpiDescriptor(KpiDescriptor)->KpiId` to add the KpiDescriptor to the `Kpi` DB. `KpiDescriptor` and `KpiId` are both pre-defined gRPC message types.
 
-3. Call `GetKpiDescriptor(KpiId)->KpiDescriptor` to read the `KpiDescriptor` from DB and `DeleteKpiDescriptor(KpiId)` to delete the `KpiDescriptor` from DB.
+3. Call `GetKpiDescriptor(KpiId)->KpiDescriptor` to read the `KpiDescriptor` from the DB and `DeleteKpiDescriptor(KpiId)` to delete the `KpiDescriptor` from the DB.
 
-4. Call `SelectKpiDescriptor(KpiDescriptorFilter)->KpiDescriptorList` to get all `KpiDescriptor` objects that matches the filter criteria. `KpiDescriptorFilter` and `KpiDescriptorList` are pre-defined gRPC message types.
-
-## For KPI composer and KPI writer
-The functionalities of KPI composer and writer is heavily dependent upon Telemetery service. Therfore, these services has other pre-requsites that are mention [here](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/requirements.in).
+4. Call `SelectKpiDescriptor(KpiDescriptorFilter)->KpiDescriptorList` to get all `KpiDescriptor` objects that matches filter criteria. `KpiDescriptorFilter` and `KpiDescriptorList` are pre-defined gRPC message types.
diff --git a/src/kpi_manager/database/KpiEngine.py b/src/kpi_manager/database/KpiEngine.py
index dff406de666b5f68539b8897fa26e0b3ad51286b..0fce7e3d36cf2f03a18f311c815719a4f17b2869 100644
--- a/src/kpi_manager/database/KpiEngine.py
+++ b/src/kpi_manager/database/KpiEngine.py
@@ -16,8 +16,6 @@ import logging, sqlalchemy
 from common.Settings import get_setting
 
 LOGGER = logging.getLogger(__name__)
-
-# CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}'
 CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}'
 
 class KpiEngine:
@@ -33,12 +31,10 @@ class KpiEngine:
             CRDB_SSLMODE   = get_setting('CRDB_SSLMODE')
             crdb_uri = CRDB_URI_TEMPLATE.format(
                 CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
-        # crdb_uri = CRDB_URI_TEMPLATE.format(
-        #         CRDB_USERNAME, CRDB_PASSWORD, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
         try:
             engine = sqlalchemy.create_engine(crdb_uri, echo=False)
             LOGGER.info(' KpiDBmanager initalized with DB URL: {:}'.format(crdb_uri))
         except: # pylint: disable=bare-except # pragma: no cover
             LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri)))
             return None # type: ignore
-        return engine 
+        return engine
diff --git a/src/kpi_manager/database/Kpi_DB.py b/src/kpi_manager/database/Kpi_DB.py
index 4b60640707c8d0c2ce90e5ab135ddf6fd4c91f63..49ad9c9b579daa918818366a1d9505089968edc2 100644
--- a/src/kpi_manager/database/Kpi_DB.py
+++ b/src/kpi_manager/database/Kpi_DB.py
@@ -34,14 +34,15 @@ class KpiDB:
 
     def create_database(self) -> None:
         if not sqlalchemy_utils.database_exists(self.db_engine.url):
-            LOGGER.debug("Database created. {:}".format(self.db_engine.url))
             sqlalchemy_utils.create_database(self.db_engine.url)
+            LOGGER.debug("Database created. {:}".format(self.db_engine.url))
 
     def drop_database(self) -> None:
         if sqlalchemy_utils.database_exists(self.db_engine.url):
             sqlalchemy_utils.drop_database(self.db_engine.url)
 
     def create_tables(self):
+        # TODO: use "get_tables(declatrative class obj)" method of "sqlalchemy_utils" to verify tables.
         try:
             KpiModel.metadata.create_all(self.db_engine)     # type: ignore
             LOGGER.debug("Tables created in the DB Name: {:}".format(self.db_name))
@@ -69,8 +70,7 @@ class KpiDB:
             session.rollback()
             if "psycopg2.errors.UniqueViolation" in str(e):
                 LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}")
-                raise AlreadyExistsException(row.__class__.__name__, row,
-                                             extra_details=["Unique key voilation: {:}".format(e)] )
+                raise AlreadyExistsException(row.__class__.__name__, row, extra_details=["Unique key voilation: {:}".format(e)] )
             else:
                 LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}")
                 raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)])
@@ -89,7 +89,6 @@ class KpiDB:
                 print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search))
                 return None
         except Exception as e:
-            session.rollback()
             LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}")
             raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)])
         finally:
diff --git a/src/kpi_manager/tests/test_kpi_db.py b/src/kpi_manager/tests/test_kpi_db.py
index e961c12bacdbac07f111b229435ed3d89d62581f..d4a57f83664f851504389b3bbe99d5c2a92542d9 100644
--- a/src/kpi_manager/tests/test_kpi_db.py
+++ b/src/kpi_manager/tests/test_kpi_db.py
@@ -21,8 +21,8 @@ LOGGER = logging.getLogger(__name__)
 def test_verify_databases_and_Tables():
     LOGGER.info('>>> test_verify_Tables : START <<< ')
     kpiDBobj = KpiDB()
-    kpiDBobj.drop_database()
-    kpiDBobj.verify_tables()
+    # kpiDBobj.drop_database()
+    # kpiDBobj.verify_tables()
     kpiDBobj.create_database()
     kpiDBobj.create_tables()
     kpiDBobj.verify_tables()
diff --git a/src/kpi_manager/tests/test_kpi_manager.py b/src/kpi_manager/tests/test_kpi_manager.py
index f0d9526d33694a683b70180eb3bc6de833bf1cfa..219fdadee9e2f4ca9ea9ac0be040043d4edfbdbe 100755
--- a/src/kpi_manager/tests/test_kpi_manager.py
+++ b/src/kpi_manager/tests/test_kpi_manager.py
@@ -17,7 +17,7 @@ import os, pytest
 import logging
 from typing import Union
 
-#from common.proto.context_pb2 import  Empty
+from common.proto.context_pb2 import  Empty
 from common.Constants import ServiceNameEnum
 from common.Settings import ( 
     ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
@@ -26,12 +26,6 @@ from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
 
 from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList
 from common.tools.service.GenericGrpcService import GenericGrpcService
-#from context.client.ContextClient import ContextClient
-
-# from device.service.driver_api.DriverFactory import DriverFactory
-# from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
-# from device.service.DeviceService import DeviceService
-# from device.client.DeviceClient import DeviceClient
 
 from kpi_manager.tests.test_messages import create_kpi_descriptor_request, create_kpi_filter_request, create_kpi_descriptor_request_a
 from kpi_manager.service.KpiManagerService import KpiManagerService
@@ -39,12 +33,6 @@ from kpi_manager.client.KpiManagerClient import KpiManagerClient
 from kpi_manager.tests.test_messages import create_kpi_descriptor_request
 from kpi_manager.tests.test_messages import create_kpi_id_request
 
-
-#from monitoring.service.NameMapping import NameMapping
-
-#os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE'
-#from device.service.drivers import DRIVERS
-
 ###########################
 # Tests Setup
 ###########################
@@ -55,8 +43,6 @@ KPIMANAGER_SERVICE_PORT = get_service_port_grpc(ServiceNameEnum.KPIMANAGER)  # t
 os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
 os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(KPIMANAGER_SERVICE_PORT)
 
-# METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME'){}
-
 LOGGER = logging.getLogger(__name__)
 
 class MockContextService(GenericGrpcService):
@@ -70,84 +56,10 @@ class MockContextService(GenericGrpcService):
         self.context_servicer = MockServicerImpl_Context()
         add_ContextServiceServicer_to_server(self.context_servicer, self.server)
 
-# @pytest.fixture(scope='session')
-# def context_service():
-#     LOGGER.info('Initializing MockContextService...')
-#     _service = MockContextService(MOCKSERVICE_PORT)
-#     _service.start()
-    
-#     LOGGER.info('Yielding MockContextService...')
-#     yield _service
-
-#     LOGGER.info('Terminating MockContextService...')
-#     _service.context_servicer.msg_broker.terminate()
-#     _service.stop()
-
-#     LOGGER.info('Terminated MockContextService...')
-
-# @pytest.fixture(scope='session')
-# def context_client(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument
-#     LOGGER.info('Initializing ContextClient...')
-#     _client = ContextClient()
-    
-#     LOGGER.info('Yielding ContextClient...')
-#     yield _client
-
-#     LOGGER.info('Closing ContextClient...')
-#     _client.close()
-
-#     LOGGER.info('Closed ContextClient...')
-
-# @pytest.fixture(scope='session')
-# def device_service(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument
-#     LOGGER.info('Initializing DeviceService...')
-#     driver_factory = DriverFactory(DRIVERS)
-#     driver_instance_cache = DriverInstanceCache(driver_factory)
-#     _service = DeviceService(driver_instance_cache)
-#     _service.start()
-
-#     # yield the server, when test finishes, execution will resume to stop it
-#     LOGGER.info('Yielding DeviceService...')
-#     yield _service
-
-#     LOGGER.info('Terminating DeviceService...')
-#     _service.stop()
-
-#     LOGGER.info('Terminated DeviceService...')
-
-# @pytest.fixture(scope='session')
-# def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument
-#     LOGGER.info('Initializing DeviceClient...')
-#     _client = DeviceClient()
-
-#     LOGGER.info('Yielding DeviceClient...')
-#     yield _client
-
-#     LOGGER.info('Closing DeviceClient...')
-#     _client.close()
-
-#     LOGGER.info('Closed DeviceClient...')
-
-# @pytest.fixture(scope='session')
-# def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument
-#     LOGGER.info('Initializing DeviceClient...')
-#     _client = DeviceClient()
-
-#     LOGGER.info('Yielding DeviceClient...')
-#     yield _client
-
-#     LOGGER.info('Closing DeviceClient...')
-#     _client.close()
-
-#     LOGGER.info('Closed DeviceClient...')
-
 # This fixture will be requested by test cases and last during testing session
 @pytest.fixture(scope='session')
 def kpi_manager_service():
     LOGGER.info('Initializing KpiManagerService...')
-    #name_mapping = NameMapping()
-    # _service = MonitoringService(name_mapping)
-    # _service = KpiManagerService(name_mapping)
     _service = KpiManagerService()
     _service.start()
 
@@ -181,35 +93,28 @@ def kpi_manager_client(kpi_manager_service : KpiManagerService): # pylint: disab
 # Prepare Environment, should be the first test
 ##################################################
 
-# # ERROR on this test --- 
-# def test_prepare_environment(
-#     context_client : ContextClient,                 # pylint: disable=redefined-outer-name,unused-argument
-# ):
-#     context_id = json_context_id(DEFAULT_CONTEXT_NAME)
-#     context_client.SetContext(Context(**json_context(DEFAULT_CONTEXT_NAME)))
-#     context_client.SetTopology(Topology(**json_topology(DEFAULT_TOPOLOGY_NAME, context_id=context_id)))
 
 ###########################
 # Tests Implementation of Kpi Manager
 ###########################
 
 # ---------- 3rd Iteration Tests ----------------
-# def test_SetKpiDescriptor(kpi_manager_client):
-#     LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ")
-#     response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
-#     LOGGER.info("Response gRPC message object: {:}".format(response))
-#     assert isinstance(response, KpiId)
+def test_SetKpiDescriptor(kpi_manager_client):
+    LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ")
+    response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+    LOGGER.info("Response gRPC message object: {:}".format(response))
+    assert isinstance(response, KpiId)
 
-# def test_DeleteKpiDescriptor(kpi_manager_client):
-#     LOGGER.info(" >>> test_DeleteKpiDescriptor: START <<< ")
-#     # adding KPI
-#     response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
-#     # deleting KPI
-#     del_response = kpi_manager_client.DeleteKpiDescriptor(response_id)
-#     # select KPI
-#     kpi_manager_client.GetKpiDescriptor(response_id)
-#     LOGGER.info("Response of delete method gRPC message object: {:}".format(del_response))
-#     assert isinstance(del_response, Empty)
+def test_DeleteKpiDescriptor(kpi_manager_client):
+    LOGGER.info(" >>> test_DeleteKpiDescriptor: START <<< ")
+    # adding KPI
+    response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+    # deleting KPI
+    del_response = kpi_manager_client.DeleteKpiDescriptor(response_id)
+    # select KPI
+    kpi_manager_client.GetKpiDescriptor(response_id)
+    LOGGER.info("Response of delete method gRPC message object: {:}".format(del_response))
+    assert isinstance(del_response, Empty)
 
 def test_GetKpiDescriptor(kpi_manager_client):
     LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ")
@@ -225,77 +130,18 @@ def test_GetKpiDescriptor(kpi_manager_client):
 
     assert isinstance(response, KpiDescriptor)
 
-# def test_SelectKpiDescriptor(kpi_manager_client):
-#     LOGGER.info(" >>> test_SelectKpiDescriptor: START <<< ")
-#     # adding KPI
-#     kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
-#     # select KPI(s)    
-#     response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request())
-#     LOGGER.info("Response gRPC message object: {:}".format(response))
-#     assert isinstance(response, KpiDescriptorList)
-
-# def test_set_list_of_KPIs(kpi_manager_client):
-#     LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ")
-#     KPIs_TO_SEARCH = ["node_in_power_total", "node_in_current_total", "node_out_power_total"]
-#     # adding KPI
-#     for kpi in KPIs_TO_SEARCH:
-#        kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(kpi))
-    
-
-# ---------- 2nd Iteration Tests -----------------
-# def test_SetKpiDescriptor(kpi_manager_client):
-#     LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ")
-#     with open("kpi_manager/tests/KPI_configs.json", 'r') as file:
-#         data = json.load(file)
-#         _descriptors = data.get('KPIs', [])
-#     for _descritor_name in _descriptors:
-#         response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(_descritor_name))
-#         LOGGER.info("Response gRPC message object: {:}".format(response))
-#     assert isinstance(response, KpiId)
-
-# def test_GetKpiDescriptor(kpi_manager_client):
-#     LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ")
-#     response = kpi_manager_client.GetKpiDescriptor(create_kpi_id_request())
-#     LOGGER.info("Response gRPC message object: {:}".format(response))
-#     assert isinstance(response, KpiDescriptor)
-
-# def test_DeleteKpiDescriptor(kpi_manager_client):
-#     LOGGER.info(" >>> test_DeleteKpiDescriptor: START <<< ")
-#     response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
-#     del_response = kpi_manager_client.DeleteKpiDescriptor(response)
-#     kpi_manager_client.GetKpiDescriptor(response)
-#     LOGGER.info("Response of delete method gRPC message object: {:}".format(del_response))
-#     assert isinstance(del_response, Empty)
-
-# def test_SelectKpiDescriptor(kpi_manager_client):
-#     LOGGER.info(" >>> test_SelectKpiDescriptor: START <<< ")
-#     kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a())
-#     response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request_a())
-#     LOGGER.info("Response gRPC message object: {:}".format(response))
-#     assert isinstance(response, KpiDescriptorList)
-
-# ------------- INITIAL TESTs ----------------
-# Test case that makes use of client fixture to test server's CreateKpi method
-# def test_set_kpi(kpi_manager_client): # pylint: disable=redefined-outer-name
-#     # make call to server
-#     LOGGER.warning('test_create_kpi requesting')
-#     for i in range(3):
-#         response = kpi_manager_client.SetKpiDescriptor(create_kpi_request(str(i+1)))
-#         LOGGER.debug(str(response))
-#         assert isinstance(response, KpiId)
-
-# # Test case that makes use of client fixture to test server's DeleteKpi method
-# def test_delete_kpi(kpi_manager_client): # pylint: disable=redefined-outer-name
-#     # make call to server
-#     LOGGER.warning('delete_kpi requesting')
-#     response = kpi_manager_client.SetKpiDescriptor(create_kpi_request('4'))
-#     response = kpi_manager_client.DeleteKpiDescriptor(response)
-#     LOGGER.debug(str(response))
-#     assert isinstance(response, Empty)
+def test_SelectKpiDescriptor(kpi_manager_client):
+    LOGGER.info(" >>> test_SelectKpiDescriptor: START <<< ")
+    # adding KPI
+    kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+    # select KPI(s)    
+    response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request())
+    LOGGER.info("Response gRPC message object: {:}".format(response))
+    assert isinstance(response, KpiDescriptorList)
 
-# # Test case that makes use of client fixture to test server's GetKpiDescriptor method
-# def test_select_kpi_descriptor(kpi_manager_client): # pylint: disable=redefined-outer-name
-#     LOGGER.warning('test_selectkpidescritor begin')
-#     response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request())
-#     LOGGER.debug(str(response))
-#     assert isinstance(response, KpiDescriptorList)
+def test_set_list_of_KPIs(kpi_manager_client):
+    LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ")
+    KPIs_TO_SEARCH = ["node_in_power_total", "node_in_current_total", "node_out_power_total"]
+    # adding KPI
+    for kpi in KPIs_TO_SEARCH:
+       kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(kpi))
diff --git a/src/kpi_value_api/.gitlab-ci.yml b/src/kpi_value_api/.gitlab-ci.yml
index 166e9d3cbcf3eb09c914384a9906853dddd7bfb5..1a6f821ba9e798bb4220d914109ab3a65f0f1792 100644
--- a/src/kpi_value_api/.gitlab-ci.yml
+++ b/src/kpi_value_api/.gitlab-ci.yml
@@ -50,10 +50,30 @@ unit_test kpi-value-api:
     - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
     - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
     - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi
+    - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi
+    - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi
     - docker container prune -f
   script:
     - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
-    - docker run --name $IMAGE_NAME -d -p 30020:30020 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+    - docker pull "bitnami/zookeeper:latest"
+    - docker pull "bitnami/kafka:latest"
+    - >
+      docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
+      bitnami/zookeeper:latest
+    - sleep 10 # Wait for Zookeeper to start
+    - docker run --name kafka -d --network=teraflowbridge -p 9092:9092
+      --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
+      --env ALLOW_PLAINTEXT_LISTENER=yes
+      bitnami/kafka:latest
+    - sleep 20 # Wait for Kafka to start
+    - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $KAFKA_IP
+    - > 
+      docker run --name $IMAGE_NAME -d -p 30020:30020
+      --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092"
+      --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results" 
+      --network=teraflowbridge 
+      $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
     - sleep 5
     - docker ps -a
     - docker logs $IMAGE_NAME
@@ -74,7 +94,7 @@ unit_test kpi-value-api:
       - src/$IMAGE_NAME/**/*.{py,in,yml}
       - src/$IMAGE_NAME/Dockerfile
       - src/$IMAGE_NAME/tests/*.py
-      - src/$IMAGE_NAME/tests/Dockerfile
+      # - src/$IMAGE_NAME/tests/Dockerfile  # mayne not needed
       - manifests/${IMAGE_NAME}service.yaml
       - .gitlab-ci.yml
   artifacts:
diff --git a/src/kpi_value_api/Dockerfile b/src/kpi_value_api/Dockerfile
index 7dd8d307b8338c4a29e97c742ca12a49c4611e0a..25b8da931f88000dd229c536456a3eb1fa7f56db 100644
--- a/src/kpi_value_api/Dockerfile
+++ b/src/kpi_value_api/Dockerfile
@@ -63,6 +63,8 @@ RUN python3 -m pip install -r requirements.txt
 # Add component files into working directory
 WORKDIR /var/teraflow
 COPY src/kpi_value_api/. kpi_value_api/
+COPY src/kpi_manager/__init__.py kpi_manager/__init__.py
+COPY src/kpi_manager/client/. kpi_manager/client/
 
 # Start the service
 ENTRYPOINT ["python", "-m", "kpi_value_api.service"]
diff --git a/src/kpi_value_api/README.md b/src/kpi_value_api/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..70ba2c5e79c79147e336307ecc6d5ddfc263df90
--- /dev/null
+++ b/src/kpi_value_api/README.md
@@ -0,0 +1,23 @@
+# How to locally run and test KPI Value API micro-service
+
+### Pre-requisets 
+Ensure the following requirements are met before executing the KPI Value API service.
+
+1. The KPI Manger service is running and Apache Kafka is running.
+
+2. A virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_api/requirements.in) file sucessfully installed.
+
+3. Call the ["create_all_topics()"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/common/tools/kafka/Variables.py) function to verify the existence of all required topics on kafka. 
+
+### Messages format templates
+The ["messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_api/tests/messages.py) python file contains templates for creating gRPC messages.
+
+### Unit test file
+The ["KPI Value API test"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_api/tests/test_kpi_value_api.py) python file enlist various tests conducted to validate functionality.
+
+### Flow of execution (Kpi Maanager Service functions)
+1. Call the `create_new_topic_if_not_exists(<list of string>)` method to create any new topics if needed.
+
+2. Call `StoreKpiValues(KpiValueList)` to produce `Kpi Value` on a Kafka Topic. (The `KpiValueWriter` microservice will consume and process the `Kpi Value`)
+
+3. Call `SelectKpiValues(KpiValueFilter) -> KpiValueList` to read metric from the Prometheus DB.
diff --git a/src/kpi_value_api/requirements.in b/src/kpi_value_api/requirements.in
index 7e4694109dc4e1d31b86abfc03162494faafcdaf..f5695906a8d02d55e15960a76986b8d03f02dba1 100644
--- a/src/kpi_value_api/requirements.in
+++ b/src/kpi_value_api/requirements.in
@@ -14,3 +14,4 @@
 
 confluent-kafka==2.3.*
 requests==2.27.*
+prometheus-api-client==0.5.3
\ No newline at end of file
diff --git a/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py b/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py
index d27de54f3cddfd0d70d656a89c45adc50e518289..4ea978fafc8d7454d41f64182d553d030215113a 100644
--- a/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py
+++ b/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py
@@ -12,98 +12,141 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, grpc, requests
-from typing import Tuple, Any
-from datetime import datetime
+import logging, grpc, json
+from typing import Dict
 from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
 from common.tools.kafka.Variables import KafkaConfig, KafkaTopic
 
 from common.proto.context_pb2 import Empty
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.proto.kpi_manager_pb2 import KpiDescriptor, KpiId
 from common.proto.kpi_value_api_pb2_grpc import KpiValueAPIServiceServicer
 from common.proto.kpi_value_api_pb2 import KpiValueList, KpiValueFilter, KpiValue, KpiValueType
 
 from confluent_kafka import Producer as KafkaProducer
 
+from prometheus_api_client import PrometheusConnect
+from prometheus_api_client.utils import parse_datetime
+
+from kpi_manager.client.KpiManagerClient import KpiManagerClient
 
 LOGGER       = logging.getLogger(__name__)
 METRICS_POOL = MetricsPool('KpiValueAPI', 'NBIgRPC')
-PROM_URL     = "http://localhost:9090"
+PROM_URL     = "http://prometheus-k8s.monitoring.svc.cluster.local:9090"    # TODO: updated with the env variables
 
 class KpiValueApiServiceServicerImpl(KpiValueAPIServiceServicer):
     def __init__(self):
         LOGGER.debug('Init KpiValueApiService')
-    
+        self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()})
+
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def StoreKpiValues(self, request: KpiValueList, grpc_context: grpc.ServicerContext
                        ) -> Empty:
         LOGGER.debug('StoreKpiValues: Received gRPC message object: {:}'.format(request))
-        producer_obj = KafkaProducer({
-            'bootstrap.servers' : KafkaConfig.SERVER_IP.value    
-        })
+
+        producer = self.kafka_producer
         for kpi_value in request.kpi_value_list:
-            kpi_value_to_produce : Tuple [str, Any, Any] = (
-                kpi_value.kpi_id.kpi_id,            
-                kpi_value.timestamp,                
-                kpi_value.kpi_value_type            # kpi_value.kpi_value_type.(many options) how?
-            )
+            kpi_value_to_produce : Dict  = {
+                "kpi_uuid"       : kpi_value.kpi_id.kpi_id.uuid,            
+                "timestamp"      : kpi_value.timestamp.timestamp,                
+                "kpi_value_type" : self.ExtractKpiValueByType(kpi_value.kpi_value_type)       
+            }
             LOGGER.debug('KPI to produce is {:}'.format(kpi_value_to_produce))
             msg_key = "gRPC-kpivalueapi"        # str(__class__.__name__) can be used
         
-            producer_obj.produce(
+            producer.produce(
                 KafkaTopic.VALUE.value, 
                 key      = msg_key,
-                value    = kpi_value.SerializeToString(),      # value = json.dumps(kpi_value_to_produce),
+                value    = json.dumps(kpi_value_to_produce),
                 callback = self.delivery_callback
             )
-            producer_obj.flush()
+            producer.flush()
         return Empty()
 
+    def ExtractKpiValueByType(self, value):
+        attributes = [ 'floatVal' , 'int32Val' , 'uint32Val','int64Val', 
+                       'uint64Val', 'stringVal', 'boolVal']
+        for attr in attributes:
+            try:
+                return getattr(value, attr)
+            except (ValueError, TypeError, AttributeError):
+                continue
+        return None
+
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SelectKpiValues(self, request: KpiValueFilter, grpc_context: grpc.ServicerContext
                         ) -> KpiValueList:
         LOGGER.debug('StoreKpiValues: Received gRPC message object: {:}'.format(request))
         response = KpiValueList()
-        metrics          = [kpi.kpi_id for kpi in request.kpi_id]
-        start_timestamps = [timestamp for timestamp in request.start_timestamp]
-        end_timestamps   = [timestamp for timestamp in request.end_timestamp]
-        results = []
+        
+        kpi_manager_client = KpiManagerClient()
+        prom_connect       = PrometheusConnect(url=PROM_URL)
 
-        for start, end in zip(start_timestamps, end_timestamps):
-            start_str = datetime.fromtimestamp(start.seconds).isoformat() + "Z"
-            end_str = datetime.fromtimestamp(end.seconds).isoformat() + "Z"
+        metrics          = [self.GetKpiSampleType(kpi, kpi_manager_client) for kpi       in request.kpi_id]
+        start_timestamps = [parse_datetime(timestamp)                      for timestamp in request.start_timestamp]
+        end_timestamps   = [parse_datetime(timestamp)                      for timestamp in request.end_timestamp]
 
+        prom_response = []
+        for start_time, end_time in zip(start_timestamps, end_timestamps):
             for metric in metrics:
-                url    = f'{PROM_URL}/api/v1/query_range'
-                params = {
-                    'query': metric,
-                    'start': start_str,
-                    'end'  : end_str,
-                    'step' : '30s'           # or any other step you need
-                }
-                response = requests.get(url, params=params)
-                if response.status_code == 200:
-                    data = response.json()
-                    for result in data['data']['result']:
-                        for value in result['values']:
-                            kpi_value = KpiValue(
-                                kpi_id=metric,
-                                timestamp=str(seconds=value[0]),
-                                kpi_value_type=self._convert_value_to_kpi_value_type(value[1])
-                            )
-                            results.append(kpi_value)
-
-    def _convert_value_to_kpi_value_type(self, value):
+                print(start_time, end_time, metric)
+                LOGGER.debug(">>> Query: {:}".format(metric))
+                prom_response.append(
+                    prom_connect.custom_query_range(
+                    query      = metric,        # this is the metric name and label config
+                    start_time = start_time,
+                    end_time   = end_time,
+                    step       = 30,            # or any other step value (missing in gRPC Filter request)
+                    )
+                )
+        
+        for single_resposne in prom_response:
+            # print ("{:}".format(single_resposne))
+            for record in single_resposne:
+                # print("Record >>> kpi: {:} >>> time & values set: {:}".format(record['metric']['__name__'], record['values']))
+                for value in record['values']:
+                    # print("{:} - {:}".format(record['metric']['__name__'], value))
+                    kpi_value = KpiValue()
+                    kpi_value.kpi_id.kpi_id  = record['metric']['__name__'],      
+                    kpi_value.timestamp      = value[0],      
+                    kpi_value.kpi_value_type = self.ConverValueToKpiValueType(value[1])
+                    response.kpi_value_list.append(kpi_value)
+        return response
+    
+    def GetKpiSampleType(self, kpi_value: str, kpi_manager_client):
+        print("--- START -----")
+
+        kpi_id = KpiId()
+        kpi_id.kpi_id.uuid = kpi_value.kpi_id.kpi_id.uuid
+        # print("KpiId generated: {:}".format(kpi_id))
+
+        try:
+            kpi_descriptor_object = KpiDescriptor()
+            kpi_descriptor_object = kpi_manager_client.GetKpiDescriptor(kpi_id)
+            # TODO: why kpi_descriptor_object recevies a KpiDescriptor type object not Empty type object???
+            if kpi_descriptor_object.kpi_id.kpi_id.uuid == kpi_id.kpi_id.uuid:
+                LOGGER.info("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object))
+                print("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object))
+                return KpiSampleType.Name(kpi_descriptor_object.kpi_sample_type)    # extract and return the name of KpiSampleType
+            else:
+                LOGGER.info("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id))
+                print("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id))
+        except Exception as e:
+            LOGGER.info("Unable to get KpiDescriptor. Error: {:}".format(e))
+            print ("Unable to get KpiDescriptor. Error: {:}".format(e))
+
+    def ConverValueToKpiValueType(self, value):
         # Check if the value is an integer (int64)
         try:
-            int64_value = int(value)
-            return KpiValueType(int64Val=int64_value)
-        except ValueError:
+            int_value = int(value)
+            return KpiValueType(int64Val=int_value)
+        except (ValueError, TypeError):
             pass
         # Check if the value is a float
         try:
             float_value = float(value)
             return KpiValueType(floatVal=float_value)
-        except ValueError:
+        except (ValueError, TypeError):
             pass
         # Check if the value is a boolean
         if value.lower() in ['true', 'false']:
@@ -112,7 +155,6 @@ class KpiValueApiServiceServicerImpl(KpiValueAPIServiceServicer):
         # If none of the above, treat it as a string
         return KpiValueType(stringVal=value)
 
-
     def delivery_callback(self, err, msg):
         if err: LOGGER.debug('Message delivery failed: {:}'.format(err))
         else:   LOGGER.debug('Message delivered to topic {:}'.format(msg.topic()))
diff --git a/src/kpi_value_api/tests/messages.py b/src/kpi_value_api/tests/messages.py
index c2a1cbb0b275fb26d6498e4470f3869a105a8d36..d8ad14bd44eebc1e9412cfd5ff2973e6018c95e9 100644
--- a/src/kpi_value_api/tests/messages.py
+++ b/src/kpi_value_api/tests/messages.py
@@ -18,8 +18,9 @@ from common.proto.kpi_value_api_pb2 import KpiValue, KpiValueList
 
 def create_kpi_value_list():
     _create_kpi_value_list = KpiValueList()
-    # To run this experiment sucessfully, already existing UUID in KPI DB in necessary.
-    # because the UUID is used to get the descriptor form KPI DB.
+    # To run this experiment sucessfully, add an existing UUID of a KPI Descriptor from the KPI DB.
+    # This UUID is used to get the descriptor form the KPI DB. If the Kpi ID does not exists, 
+    # some part of the code won't execute.
     EXISTING_KPI_IDs = ["725ce3ad-ac67-4373-bd35-8cd9d6a86e09",
                         str(uuid.uuid4()), 
                         str(uuid.uuid4())]
diff --git a/src/kpi_value_writer/.gitlab-ci.yml b/src/kpi_value_writer/.gitlab-ci.yml
index 25619ce7f8b4346172587dbf2e804896aff20e4d..9a2f9fd47e435b26e2e3a335bd9b95da58a0517f 100644
--- a/src/kpi_value_writer/.gitlab-ci.yml
+++ b/src/kpi_value_writer/.gitlab-ci.yml
@@ -50,10 +50,30 @@ unit_test kpi-value-writer:
     - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
     - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
     - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi
+    - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi
+    - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi
     - docker container prune -f
   script:
     - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
-    - docker run --name $IMAGE_NAME -d -p 30030:30030 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+    - docker pull "bitnami/zookeeper:latest"
+    - docker pull "bitnami/kafka:latest"
+    - >
+      docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
+      bitnami/zookeeper:latest
+    - sleep 10 # Wait for Zookeeper to start
+    - docker run --name kafka -d --network=teraflowbridge -p 9092:9092
+      --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
+      --env ALLOW_PLAINTEXT_LISTENER=yes
+      bitnami/kafka:latest
+    - sleep 20 # Wait for Kafka to start
+    - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $KAFKA_IP
+    - >
+      docker run --name $IMAGE_NAME -d -p 30030:30030
+      --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092"
+      --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results"
+      --network=teraflowbridge 
+      $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
     - sleep 5
     - docker ps -a
     - docker logs $IMAGE_NAME
@@ -64,6 +84,8 @@ unit_test kpi-value-writer:
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
     - docker rm -f $IMAGE_NAME
+    - docker rm -f zookeeper
+    - docker rm -f kafka
     - docker network rm teraflowbridge
   rules:
     - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
diff --git a/src/kpi_value_writer/README.md b/src/kpi_value_writer/README.md
index 72ba6e5594adeef4a29d650615716c26273ed115..c45a0e39534fae9efef4174d5ca5be7047845c48 100644
--- a/src/kpi_value_writer/README.md
+++ b/src/kpi_value_writer/README.md
@@ -1,29 +1,17 @@
-# How to locally run and test KPI manager micro-service
+# How to locally run and test the KPI Value Writer micro-service
 
-## --- File links need to be updated. ---
 ### Pre-requisets 
-The following requirements should be fulfilled before the execuation of KPI management service.
+Ensure the following requirements are meet before executing the KPI Value Writer service>
 
-1. Verify that [kpi_management.proto](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/proto/kpi_management.proto) file exists and grpcs file are generated sucessfully. 
-2. Virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/requirements.in) are installed sucessfully.
-3. Verify the creation of required database and table.
-[KPI DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/database/tests/KpiDBtests.py) python file enlist the functions to create tables and database and
-[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/service/database/KpiEngine.py) contains the DB string, update the string as per your deployment.
+1. The KPI Manger and KPI Value API services are running and Apache Kafka is running.
 
-### Messages format templates
-["Messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_messages.py) python file enlist the basic gRPC messages format used during the testing.
-
-### Test file
-["KPI management test"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_kpi_manager.py) python file enlist different tests conducted during the experiment.
-
-### Flow of execution (Kpi Maanager Service functions)
-1. Call the `create_database()` and `create_tables()` functions from `Kpi_DB` class to create the required database and table if they don't exist. Call `verify_tables` to verify the existence of KPI table.
+2. A Virtual enviornment exist with all the required packages listed in the ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_writer/requirements.in) file installed sucessfully.
 
-2. Call the gRPC method `SetKpiDescriptor(KpiDescriptor)->KpiId` to add the KpiDescriptor in `Kpi` DB. `KpiDescriptor` and `KpiId` are both pre-defined gRPC message types.
-
-3. Call `GetKpiDescriptor(KpiId)->KpiDescriptor` to read the `KpiDescriptor` from DB and `DeleteKpiDescriptor(KpiId)` to delete the `KpiDescriptor` from DB.
+### Messages format templates
+The ["messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_writer/tests/test_messages.py) python file contains the templates to create gRPC messages.
 
-4. Call `SelectKpiDescriptor(KpiDescriptorFilter)->KpiDescriptorList` to get all `KpiDescriptor` objects that matches the filter criteria. `KpiDescriptorFilter` and `KpiDescriptorList` are pre-defined gRPC message types.
+### Unit test file
+The ["KPI Value API test"](https://labs.etsi.org/rep/tfs/controller/-/blob/develop/src/kpi_value_writer/tests/test_kpi_value_writer.py) python file enlist various tests conducted to validate functionality.
 
-## For KPI composer and KPI writer
-The functionalities of KPI composer and writer is heavily dependent upon Telemetery service. Therfore, these services has other pre-requsites that are mention [here](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/requirements.in).
\ No newline at end of file
+### Flow of execution
+1. Call the `RunKafkaConsumer` method from the `KpiValueWriter` class to start consuming the `KPI Value` generated by the `KPI Value API` or `Telemetry`. For every valid `KPI Value` consumer from Kafka, it invokes the `PrometheusWriter` class to prepare and push the metric to the Promethues DB.
diff --git a/src/kpi_value_writer/service/KpiValueWriter.py b/src/kpi_value_writer/service/KpiValueWriter.py
index 26bab44657606b1f3edc14659d128c5ccc7a6890..8b258a1424cc44be4dcb9134ee913c707cc44bfa 100644
--- a/src/kpi_value_writer/service/KpiValueWriter.py
+++ b/src/kpi_value_writer/service/KpiValueWriter.py
@@ -12,6 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import json
 import logging
 import threading
 from common.tools.kafka.Variables import KafkaConfig, KafkaTopic
@@ -33,32 +34,30 @@ from .MetricWriterToPrometheus import MetricWriterToPrometheus
 
 LOGGER           = logging.getLogger(__name__)
 ACTIVE_CONSUMERS = []
-METRIC_WRITER    = MetricWriterToPrometheus()
 
 class KpiValueWriter(GenericGrpcService):
     def __init__(self, cls_name : str = __name__) -> None:
         port = get_service_port_grpc(ServiceNameEnum.KPIVALUEWRITER)
         super().__init__(port, cls_name=cls_name)
+        self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(),
+                                            'group.id'           : 'KpiValueWriter',
+                                            'auto.offset.reset'  : 'latest'})
 
-    @staticmethod
-    def RunKafkaConsumer():
-        thread = threading.Thread(target=KpiValueWriter.KafkaConsumer, args=())
+    def RunKafkaConsumer(self):
+        thread = threading.Thread(target=self.KafkaKpiConsumer, args=())
         ACTIVE_CONSUMERS.append(thread)
         thread.start()
 
-    @staticmethod
-    def KafkaConsumer():
-        kafka_consumer  = KafkaConsumer(
-            { 'bootstrap.servers' : KafkaConfig.SERVER_IP.value,
-              'group.id'          : __class__,
-              'auto.offset.reset' : 'latest'}
-        )
+    def KafkaKpiConsumer(self):
         kpi_manager_client = KpiManagerClient()
-        kafka_consumer.subscribe([KafkaTopic.VALUE.value])
+        metric_writer      = MetricWriterToPrometheus()
+
+        consumer = self.kafka_consumer
+        consumer.subscribe([KafkaTopic.VALUE.value])
         LOGGER.debug("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value))
         print("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value))
         while True:
-            raw_kpi = kafka_consumer.poll(1.0)
+            raw_kpi = consumer.poll(1.0)
             if raw_kpi is None:
                 continue
             elif raw_kpi.error():
@@ -68,33 +67,29 @@ class KpiValueWriter(GenericGrpcService):
                     print("Consumer error: {}".format(raw_kpi.error()))
                     continue
             try:
-                kpi_value = KpiValue()
-                kpi_value.ParseFromString(raw_kpi.value())
+                kpi_value = json.loads(raw_kpi.value().decode('utf-8'))
                 LOGGER.info("Received KPI : {:}".format(kpi_value))
                 print("Received KPI : {:}".format(kpi_value))
-                KpiValueWriter.get_kpi_descriptor(kpi_value, kpi_manager_client)
+                self.get_kpi_descriptor(kpi_value, kpi_manager_client, metric_writer)
             except Exception as e:
                 print("Error detail: {:}".format(e))
                 continue
 
-    @staticmethod
-    def get_kpi_descriptor(kpi_value: str, kpi_manager_client ):
+    def get_kpi_descriptor(self, kpi_value: str, kpi_manager_client, metric_writer):
         print("--- START -----")
 
         kpi_id = KpiId()
-        kpi_id.kpi_id.uuid = kpi_value.kpi_id.kpi_id.uuid
+        kpi_id.kpi_id.uuid = kpi_value['kpi_uuid']
         print("KpiId generated: {:}".format(kpi_id))
         # print("Kpi manger client created: {:}".format(kpi_manager_client))
-
         try:
             kpi_descriptor_object = KpiDescriptor()
             kpi_descriptor_object = kpi_manager_client.GetKpiDescriptor(kpi_id)
+            # TODO: why kpi_descriptor_object recevies a KpiDescriptor type object not Empty type object???
             if kpi_descriptor_object.kpi_id.kpi_id.uuid == kpi_id.kpi_id.uuid:
-            # print("kpi descriptor received: {:}".format(kpi_descriptor_object))
-            # if isinstance (kpi_descriptor_object, KpiDescriptor):
                 LOGGER.info("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object))
                 print("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object))
-                METRIC_WRITER.create_and_expose_cooked_kpi(kpi_descriptor_object, kpi_value)
+                metric_writer.create_and_expose_cooked_kpi(kpi_descriptor_object, kpi_value)
             else:
                 LOGGER.info("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id))
                 print("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id))
diff --git a/src/kpi_value_writer/service/MetricWriterToPrometheus.py b/src/kpi_value_writer/service/MetricWriterToPrometheus.py
index b681164786bd310d457998bae55b836522888b94..85e618a4b5b330cb83cf255652e7be8dff2dabd3 100644
--- a/src/kpi_value_writer/service/MetricWriterToPrometheus.py
+++ b/src/kpi_value_writer/service/MetricWriterToPrometheus.py
@@ -14,11 +14,9 @@
 
 # read Kafka stream from Kafka topic
 
-import ast
-import time
-import threading
 import logging
-from prometheus_client import start_http_server, Gauge, CollectorRegistry
+from typing import Dict
+from prometheus_client import Gauge
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 
 from common.proto.kpi_value_api_pb2 import KpiValue
@@ -26,7 +24,6 @@ from common.proto.kpi_manager_pb2 import KpiDescriptor
 
 LOGGER         = logging.getLogger(__name__)
 PROM_METRICS   = {}
-PROM_REGISTERY = CollectorRegistry()
 
 class MetricWriterToPrometheus:
     '''
@@ -34,13 +31,7 @@ class MetricWriterToPrometheus:
     cooked KPI value = KpiDescriptor (gRPC message) + KpiValue (gRPC message)
     '''
     def __init__(self):
-        # prometheus server address and configs
-        self.start_prometheus_client()
         pass
-    
-    def start_prometheus_client(self):
-        start_http_server(10808, registry=PROM_REGISTERY)
-        LOGGER.debug("Prometheus client is started on port 10808")
 
     def merge_kpi_descriptor_and_kpi_value(self, kpi_descriptor, kpi_value):
             # Creating a dictionary from the kpi_descriptor's attributes
@@ -54,25 +45,24 @@ class MetricWriterToPrometheus:
                 'slice_id'       : kpi_descriptor.slice_id.slice_uuid.uuid,
                 'connection_id'  : kpi_descriptor.connection_id.connection_uuid.uuid,
                 'link_id'        : kpi_descriptor.link_id.link_uuid.uuid,
-                'time_stamp'      : kpi_value.timestamp.timestamp,
-                'kpi_value'      : kpi_value.kpi_value_type.floatVal
+                'time_stamp'     : kpi_value['timestamp'],
+                'kpi_value'      : kpi_value['kpi_value_type']
             }
             # LOGGER.debug("Cooked Kpi: {:}".format(cooked_kpi))
             return cooked_kpi
 
-    def create_and_expose_cooked_kpi(self, kpi_descriptor: KpiDescriptor, kpi_value: KpiValue):
+    def create_and_expose_cooked_kpi(self, kpi_descriptor: KpiDescriptor, kpi_value: Dict):
         # merge both gRPC messages into single varible.
         cooked_kpi = self.merge_kpi_descriptor_and_kpi_value(kpi_descriptor, kpi_value)
-        tags_to_exclude = {'kpi_description', 'kpi_sample_type', 'kpi_value'} # extracted values will be used as metric tag
-        metric_tags = [tag for tag in cooked_kpi.keys() if tag not in tags_to_exclude]
+        tags_to_exclude = {'kpi_description', 'kpi_sample_type', 'kpi_value'}           
+        metric_tags = [tag for tag in cooked_kpi.keys() if tag not in tags_to_exclude]  # These values will be used as metric tags
         metric_name = cooked_kpi['kpi_sample_type']
         try:
             if metric_name not in PROM_METRICS:     # Only register the metric, when it doesn't exists
                 PROM_METRICS[metric_name] = Gauge ( 
                     metric_name,
                     cooked_kpi['kpi_description'],
-                    metric_tags,
-                    registry=PROM_REGISTERY
+                    metric_tags
                 )
             LOGGER.debug("Metric is created with labels: {:}".format(metric_tags))
             PROM_METRICS[metric_name].labels(
@@ -84,7 +74,7 @@ class MetricWriterToPrometheus:
                     connection_id   = cooked_kpi['connection_id'],
                     link_id         = cooked_kpi['link_id'],
                     time_stamp      = cooked_kpi['time_stamp'],
-                ).set(float(cooked_kpi['kpi_value']))
+                ).set(cooked_kpi['kpi_value'])
             LOGGER.debug("Metric pushed to the endpoints: {:}".format(PROM_METRICS[metric_name]))
 
         except ValueError as e:
@@ -93,4 +83,5 @@ class MetricWriterToPrometheus:
                 print("Metric {:} is already registered. Skipping.".format(metric_name))
             else:
                 LOGGER.error("Error while pushing metric: {}".format(e))
-                raise
\ No newline at end of file
+                raise
+
diff --git a/src/kpi_value_writer/service/__main__.py b/src/kpi_value_writer/service/__main__.py
index aa67540fb899781297d1235dc2e15bcbb2c38585..be9f8f29bfdb2397eedd0ce2821c5da8f778cfc4 100644
--- a/src/kpi_value_writer/service/__main__.py
+++ b/src/kpi_value_writer/service/__main__.py
@@ -13,6 +13,7 @@
 # limitations under the License.
 
 import logging, signal, sys, threading
+from prometheus_client import start_http_server
 from kpi_value_writer.service.KpiValueWriter import KpiValueWriter
 from common.Settings import get_log_level
 
@@ -38,6 +39,8 @@ def main():
     grpc_service = KpiValueWriter()
     grpc_service.start()
 
+    start_http_server(10808)
+    LOGGER.debug("Prometheus client is started on port 10808")
     # Wait for Ctrl+C or termination signal
     while not terminate.wait(timeout=1.0): pass
 
diff --git a/src/kpi_value_writer/tests/test_kpi_value_writer.py b/src/kpi_value_writer/tests/test_kpi_value_writer.py
index 572495d48d70cdc40c0ef6bb1efcf877e2a610ee..b784fae5da713f9bd7cd7a1668f48b080f7a84fa 100755
--- a/src/kpi_value_writer/tests/test_kpi_value_writer.py
+++ b/src/kpi_value_writer/tests/test_kpi_value_writer.py
@@ -14,31 +14,12 @@
 
 import logging
 from kpi_value_writer.service.KpiValueWriter import KpiValueWriter
+
 from common.tools.kafka.Variables import KafkaTopic
-from kpi_manager.client.KpiManagerClient import KpiManagerClient
-from kpi_manager.tests.test_messages import create_kpi_descriptor_request
-from common.proto.kpi_manager_pb2 import KpiDescriptor
-from kpi_value_writer.tests.test_messages import create_kpi_id_request
 
-LOGGER = logging.getLogger(__name__)
 
-# def test_GetKpiDescriptor():
-#     LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ")
-#     kpi_manager_client = KpiManagerClient()
-#     # adding KPI
-#     LOGGER.info(" --->>> calling SetKpiDescriptor ")
-#     response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
-#     # get KPI
-#     LOGGER.info(" --->>> calling GetKpiDescriptor with response ID")
-#     response = kpi_manager_client.GetKpiDescriptor(response_id)
-#     LOGGER.info("Response gRPC message object: {:}".format(response))
-    
-#     LOGGER.info(" --->>> calling GetKpiDescriptor with random ID")
-#     rand_response = kpi_manager_client.GetKpiDescriptor(create_kpi_id_request())
-#     LOGGER.info("Response gRPC message object: {:}".format(rand_response))
 
-#     LOGGER.info("\n------------------ TEST FINISHED ---------------------\n")
-#     assert isinstance(response, KpiDescriptor)
+LOGGER = logging.getLogger(__name__)
 
 # -------- Initial Test ----------------
 def test_validate_kafka_topics():
@@ -48,5 +29,5 @@ def test_validate_kafka_topics():
 
 def test_KafkaConsumer():
     LOGGER.debug(" --->>> test_kafka_consumer: START <<<--- ")
-    KpiValueWriter.RunKafkaConsumer()
-
+    kpi_value_writer = KpiValueWriter()
+    kpi_value_writer.RunKafkaConsumer()
diff --git a/src/telemetry/.gitlab-ci.yml b/src/telemetry/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..110a6490d20558c6589550be45b6432e500ba9d6
--- /dev/null
+++ b/src/telemetry/.gitlab-ci.yml
@@ -0,0 +1,203 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build telemetry:
+  variables:
+    IMAGE_NAME: 'telemetry'       # name of the microservice
+    IMAGE_TAG: 'latest'             # tag of the container image (production, development, etc)
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    # This first build tags the builder resulting image to prevent being removed by dangling image removal command
+    # - docker buildx build -t "${IMAGE_NAME}-backend:${IMAGE_TAG}-builder" --target builder -f ./src/$IMAGE_NAME/backend/Dockerfile .
+    - docker buildx build -t "${IMAGE_NAME}-frontend:$IMAGE_TAG" -f ./src/$IMAGE_NAME/frontend/Dockerfile .
+    - docker buildx build -t "${IMAGE_NAME}-backend:$IMAGE_TAG" -f ./src/$IMAGE_NAME/backend/Dockerfile .
+    - docker tag "${IMAGE_NAME}-frontend:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG"
+    - docker tag "${IMAGE_NAME}-backend:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/.gitlab-ci.yml
+      - src/$IMAGE_NAME/frontend/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/frontend/Dockerfile
+      - src/$IMAGE_NAME/frontend/tests/*.py
+      - src/$IMAGE_NAME/backend/Dockerfile
+      - src/$IMAGE_NAME/backend/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/backend/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+
+# Apply unit test to the component
+unit_test telemetry-backend:
+  variables:
+    IMAGE_NAME: 'telemetry' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build telemetry
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
+    - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi
+    - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi
+    # - if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend container is not in the system"; fi
+    - if docker container ls | grep ${IMAGE_NAME}-backend; then docker rm -f ${IMAGE_NAME}-backend; else echo "${IMAGE_NAME}-backend container is not in the system"; fi
+    - docker container prune -f
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG"
+    - docker pull "bitnami/zookeeper:latest"
+    - docker pull "bitnami/kafka:latest"
+    - >
+      docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
+      bitnami/zookeeper:latest
+    - sleep 10 # Wait for Zookeeper to start
+    - docker run --name kafka -d --network=teraflowbridge -p 9092:9092
+      --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
+      --env ALLOW_PLAINTEXT_LISTENER=yes
+      bitnami/kafka:latest
+    - sleep 20 # Wait for Kafka to start
+    - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $KAFKA_IP    
+    - >
+      docker run --name $IMAGE_NAME-backend -d -p 30060:30060
+      --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092"
+      --volume "$PWD/src/$IMAGE_NAME/backend/tests:/opt/results"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG
+    - docker ps -a
+    - sleep 5
+    - docker logs ${IMAGE_NAME}-backend
+    - >
+      docker exec -i ${IMAGE_NAME}-backend bash -c
+      "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}-backend_report.xml $IMAGE_NAME/backend/tests/test_*.py"
+    - docker exec -i ${IMAGE_NAME}-backend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+  coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    - docker network rm teraflowbridge
+    - docker volume prune --force
+    - docker image prune --force
+    - docker rm -f ${IMAGE_NAME}-backend
+    - docker rm -f zookeeper
+    - docker rm -f kafka
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/backend/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/backend/Dockerfile
+      - src/$IMAGE_NAME/backend/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+  artifacts:
+      when: always
+      reports:
+        junit: src/$IMAGE_NAME/backend/tests/${IMAGE_NAME}-backend_report.xml
+
+# Apply unit test to the component
+unit_test telemetry-frontend:
+  variables:
+    IMAGE_NAME: 'telemetry' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build telemetry
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
+    - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi
+    - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi
+    - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi
+    - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi
+    - if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend container is not in the system"; fi
+    - docker container prune -f
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG"
+    - docker pull "bitnami/zookeeper:latest"
+    - docker pull "bitnami/kafka:latest"
+    - docker pull "cockroachdb/cockroach:latest-v22.2"
+    - docker volume create crdb
+    - >
+      docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080
+      --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123
+      --volume "crdb:/cockroach/cockroach-data"
+      cockroachdb/cockroach:latest-v22.2 start-single-node
+    - echo "Waiting for initialization..."
+    - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done
+    # - docker logs crdb
+    # - docker ps -a
+    - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $CRDB_ADDRESS
+    - >
+      docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 \
+      -e ALLOW_ANONYMOUS_LOGIN=yes \
+      bitnami/zookeeper:latest
+    - sleep 10 # Wait for Zookeeper to start
+    - docker run --name kafka -d --network=teraflowbridge -p 9092:9092
+      --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
+      --env ALLOW_PLAINTEXT_LISTENER=yes
+      bitnami/kafka:latest
+    - sleep 20 # Wait for Kafka to start
+    - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $KAFKA_IP
+    # - docker logs zookeeper
+    # - docker logs kafka
+    - >
+      docker run --name $IMAGE_NAME-frontend -d -p 30050:30050
+      --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require"
+      --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092"
+      --volume "$PWD/src/$IMAGE_NAME/frontend/tests:/opt/results"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG
+    - docker ps -a
+    - sleep 5
+    - docker logs ${IMAGE_NAME}-frontend
+    - >
+      docker exec -i ${IMAGE_NAME}-frontend bash -c
+      "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}-frontend_report.xml $IMAGE_NAME/frontend/tests/test_*.py"
+    - docker exec -i ${IMAGE_NAME}-frontend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+  coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    - docker volume rm -f crdb
+    - docker network rm teraflowbridge
+    - docker volume prune --force
+    - docker image prune --force
+    - docker rm -f ${IMAGE_NAME}-frontend
+    - docker rm -f zookeeper
+    - docker rm -f kafka
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/frontend/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/frontend/Dockerfile
+      - src/$IMAGE_NAME/frontend/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+  artifacts:
+      when: always
+      reports:
+        junit: src/$IMAGE_NAME/frontend/tests/${IMAGE_NAME}-frontend_report.xml
\ No newline at end of file
diff --git a/src/telemetry/backend/Dockerfile b/src/telemetry/backend/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..eebfe24ab3ca457b9d05b02a07f4b28d6f196987
--- /dev/null
+++ b/src/telemetry/backend/Dockerfile
@@ -0,0 +1,69 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ git && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/telemetry/backend
+WORKDIR /var/teraflow/telemetry/backend
+COPY src/telemetry/backend/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/telemetry/__init__.py telemetry/__init__.py
+COPY src/telemetry/backend/. telemetry/backend/
+
+# Start the service
+ENTRYPOINT ["python", "-m", "telemetry.backend.service"]
diff --git a/src/telemetry/backend/requirements.in b/src/telemetry/backend/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..e6a559be714faa31196206dbbdc53788506369b5
--- /dev/null
+++ b/src/telemetry/backend/requirements.in
@@ -0,0 +1,15 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+confluent-kafka==2.3.*
diff --git a/src/telemetry/backend/service/TelemetryBackendService.py b/src/telemetry/backend/service/TelemetryBackendService.py
index d81be79dbe410ccbf2781816f34735f6bfe5639d..6ab841238f446a2895cd163fab4b7eb05eaa3176 100755
--- a/src/telemetry/backend/service/TelemetryBackendService.py
+++ b/src/telemetry/backend/service/TelemetryBackendService.py
@@ -12,64 +12,52 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import ast
+import json
 import time
 import random
 import logging
-import requests
 import threading
-from typing import Any, Tuple
-from common.proto.context_pb2 import Empty
+from typing import Any, Dict
+# from common.proto.context_pb2 import Empty
 from confluent_kafka import Producer as KafkaProducer
 from confluent_kafka import Consumer as KafkaConsumer
-from confluent_kafka import KafkaException
 from confluent_kafka import KafkaError
-from confluent_kafka.admin import AdminClient, NewTopic
-from common.proto.telemetry_frontend_pb2 import Collector, CollectorId
-from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.tools.kafka.Variables import KafkaConfig, KafkaTopic
+from common.method_wrappers.Decorator import MetricsPool
+from common.tools.service.GenericGrpcService import GenericGrpcService
 
 LOGGER             = logging.getLogger(__name__)
-METRICS_POOL       = MetricsPool('Telemetry', 'TelemetryBackend')
-KAFKA_SERVER_IP    = '127.0.0.1:9092'
-# KAFKA_SERVER_IP    = '10.152.183.175:30092'
-ADMIN_KAFKA_CLIENT = AdminClient({'bootstrap.servers': KAFKA_SERVER_IP})
-KAFKA_TOPICS       = {'request' : 'topic_request', 'response': 'topic_response',
-                      'raw'     : 'topic_raw'    , 'labeled' : 'topic_labeled'}
-EXPORTER_ENDPOINT  = "http://10.152.183.2:9100/metrics"
-PRODUCER_CONFIG    = {'bootstrap.servers': KAFKA_SERVER_IP,}
+METRICS_POOL       = MetricsPool('TelemetryBackend', 'backendService')
 
-
-class TelemetryBackendService:
+class TelemetryBackendService(GenericGrpcService):
     """
-    Class to listens for request on Kafka topic, fetches metrics and produces measured values to another Kafka topic.
+    Class listens for request on Kafka topic, fetches requested metrics from device.
+    Produces metrics on both RESPONSE and VALUE kafka topics.
     """
-
-    def __init__(self):
+    def __init__(self, cls_name : str = __name__) -> None:
         LOGGER.info('Init TelemetryBackendService')
+        port = get_service_port_grpc(ServiceNameEnum.TELEMETRYBACKEND)
+        super().__init__(port, cls_name=cls_name)
+        self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()})
+        self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(),
+                                            'group.id'           : 'backend',
+                                            'auto.offset.reset'  : 'latest'})
         self.running_threads = {}
-    
-    def run_kafka_listener(self)->bool:
-        threading.Thread(target=self.kafka_listener).start()
-        return True        
-    
-    def kafka_listener(self):
+
+    def install_servicers(self):
+        threading.Thread(target=self.RequestListener).start()
+
+    def RequestListener(self):
         """
         listener for requests on Kafka topic.
         """
-        conusmer_configs = {
-            'bootstrap.servers' : KAFKA_SERVER_IP,
-            'group.id'          : 'backend',
-            'auto.offset.reset' : 'latest'
-        }
-        # topic_request = "topic_request"
-        consumerObj = KafkaConsumer(conusmer_configs)
-        # consumerObj.subscribe([topic_request])
-        consumerObj.subscribe([KAFKA_TOPICS['request']])
-
+        consumer = self.kafka_consumer
+        consumer.subscribe([KafkaTopic.REQUEST.value])
         while True:
-            receive_msg = consumerObj.poll(2.0)
+            receive_msg = consumer.poll(2.0)
             if receive_msg is None:
-                # print (time.time(), " - Telemetry backend is listening on Kafka Topic: ", KAFKA_TOPICS['request'])     # added for debugging purposes
                 continue
             elif receive_msg.error():
                 if receive_msg.error().code() == KafkaError._PARTITION_EOF:
@@ -77,177 +65,175 @@ class TelemetryBackendService:
                 else:
                     print("Consumer error: {}".format(receive_msg.error()))
                     break
-            (kpi_id, duration, interval) = ast.literal_eval(receive_msg.value().decode('utf-8'))
+            
+            collector = json.loads(receive_msg.value().decode('utf-8'))
             collector_id = receive_msg.key().decode('utf-8')
-            if duration == -1 and interval == -1:
-                self.terminate_collector_backend(collector_id)
-                # threading.Thread(target=self.terminate_collector_backend, args=(collector_id))
+            LOGGER.debug('Recevied Collector: {:} - {:}'.format(collector_id, collector))
+            print('Recevied Collector: {:} - {:}'.format(collector_id, collector))
+
+            if collector['duration'] == -1 and collector['interval'] == -1:
+                self.TerminateCollectorBackend(collector_id)
             else:
-                self.run_initiate_collector_backend(collector_id, kpi_id, duration, interval)
+                self.RunInitiateCollectorBackend(collector_id, collector)
 
+    def TerminateCollectorBackend(self, collector_id):
+        if collector_id in self.running_threads:
+            thread, stop_event = self.running_threads[collector_id]
+            stop_event.set()
+            thread.join()
+            print ("Terminating backend (by StopCollector): Collector Id: ", collector_id)
+            del self.running_threads[collector_id]
+            self.GenerateCollectorResponse(collector_id, "-1", -1)          # Termination confirmation to frontend.
+        else:
+            print ('Backend collector {:} not found'.format(collector_id))
 
-    def run_initiate_collector_backend(self, collector_id: str, kpi_id: str, duration: int, interval: int):
+    def RunInitiateCollectorBackend(self, collector_id: str, collector: str):
         stop_event = threading.Event()
-        thread = threading.Thread(target=self.initiate_collector_backend, 
-                                  args=(collector_id, kpi_id, duration, interval, stop_event))
+        thread = threading.Thread(target=self.InitiateCollectorBackend, 
+                                  args=(collector_id, collector, stop_event))
         self.running_threads[collector_id] = (thread, stop_event)
         thread.start()
 
-    def initiate_collector_backend(self, collector_id, kpi_id, duration, interval, stop_event
-                        ): # type: ignore
+    def InitiateCollectorBackend(self, collector_id, collector, stop_event):
         """
-        Method to receive collector request attribues and initiates collecter backend.
+        Method receives collector request and initiates collecter backend.
         """
         print("Initiating backend for collector: ", collector_id)
         start_time = time.time()
         while not stop_event.is_set():
-            if time.time() - start_time >= duration:            # condition to terminate backend
+            if time.time() - start_time >= collector['duration']:            # condition to terminate backend
                 print("Execuation duration completed: Terminating backend: Collector Id: ", collector_id, " - ", time.time() - start_time)
-                self.generate_kafka_response(collector_id, "-1", -1)
-                # write to Kafka to send the termination confirmation.
+                self.GenerateCollectorResponse(collector_id, "-1", -1)       # Termination confirmation to frontend.
                 break
-            # print ("Received KPI: ", kpi_id, ", Duration: ", duration, ", Fetch Interval: ", interval)
-            self.extract_kpi_value(collector_id, kpi_id)
-            # print ("Telemetry Backend running for KPI: ", kpi_id, "after FETCH INTERVAL: ", interval)
-            time.sleep(interval)
+            self.ExtractKpiValue(collector_id, collector['kpi_id'])
+            time.sleep(collector['interval'])
 
-    def extract_kpi_value(self, collector_id: str, kpi_id: str):
+    def ExtractKpiValue(self, collector_id: str, kpi_id: str):
         """
         Method to extract kpi value.
         """
-        measured_kpi_value = random.randint(1,100)                  # Should be extracted from exporter/stream
-        # measured_kpi_value = self.fetch_node_exporter_metrics()     # exporter extracted metric value against default KPI
-        self.generate_kafka_response(collector_id, kpi_id , measured_kpi_value)
+        measured_kpi_value = random.randint(1,100)                      # TODO: To be extracted from a device
+        print ("Measured Kpi value: {:}".format(measured_kpi_value))
+        # measured_kpi_value = self.fetch_node_exporter_metrics()       # exporter extracted metric value against default KPI
+        self.GenerateCollectorResponse(collector_id, kpi_id , measured_kpi_value)
 
-    def generate_kafka_response(self, collector_id: str, kpi_id: str, kpi_value: Any):
+    def GenerateCollectorResponse(self, collector_id: str, kpi_id: str, measured_kpi_value: Any):
         """
-        Method to write response on Kafka topic
+        Method to write kpi value on RESPONSE Kafka topic
         """
-        # topic_response = "topic_response"
-        msg_value : Tuple [str, Any] = (kpi_id, kpi_value)
-        msg_key    = collector_id
-        producerObj = KafkaProducer(PRODUCER_CONFIG)
-        # producerObj.produce(topic_response, key=msg_key, value= str(msg_value), callback=self.delivery_callback)
-        producerObj.produce(KAFKA_TOPICS['response'], key=msg_key, value= str(msg_value), callback=TelemetryBackendService.delivery_callback)
-        producerObj.flush()
-
-    def terminate_collector_backend(self, collector_id):
-        if collector_id in self.running_threads:
-            thread, stop_event = self.running_threads[collector_id]
-            stop_event.set()
-            thread.join()
-            print ("Terminating backend (by StopCollector): Collector Id: ", collector_id)
-            del self.running_threads[collector_id]
-            self.generate_kafka_response(collector_id, "-1", -1)
+        producer = self.kafka_producer
+        kpi_value : Dict = {
+            "kpi_id"    : kpi_id,
+            "kpi_value" : measured_kpi_value
+        }
+        producer.produce(
+            KafkaTopic.RESPONSE.value,
+            key      = collector_id,
+            value    = json.dumps(kpi_value),
+            callback = self.delivery_callback
+        )
+        producer.flush()
 
-    def create_topic_if_not_exists(self, new_topics: list) -> bool:
-        """
-        Method to create Kafka topic if it does not exist.
-        Args:
-            admin_client (AdminClient): Kafka admin client.
+    def GenerateRawMetric(self, metrics: Any):
         """
-        for topic in new_topics:
-            try:
-                topic_metadata = ADMIN_KAFKA_CLIENT.list_topics(timeout=5)
-                if topic not in topic_metadata.topics:
-                    # If the topic does not exist, create a new topic
-                    print(f"Topic '{topic}' does not exist. Creating...")
-                    LOGGER.warning("Topic {:} does not exist. Creating...".format(topic))
-                    new_topic = NewTopic(topic, num_partitions=1, replication_factor=1)
-                    ADMIN_KAFKA_CLIENT.create_topics([new_topic])
-            except KafkaException as e:
-                print(f"Failed to create topic: {e}")
-                return False
-        return True
-
-    @staticmethod
-    def delivery_callback( err, msg):
-        """
-        Callback function to handle message delivery status.
-        Args:
-            err (KafkaError): Kafka error object.
-            msg (Message): Kafka message object.
+        Method writes raw metrics on VALUE Kafka topic
         """
-        if err:
-            print(f'Message delivery failed: {err}')
-        else:
-            print(f'Message delivered to topic {msg.topic()}')
+        producer = self.kafka_producer
+        some_metric : Dict = {
+            "some_id"    : metrics
+        }
+        producer.produce(
+            KafkaTopic.VALUE.value,
+            key      = 'raw',
+            value    = json.dumps(some_metric),
+            callback = self.delivery_callback
+        )
+        producer.flush()
 
-# ----------- BELOW: Actual Implementation of Kafka Producer with Node Exporter -----------
-    @staticmethod
-    def fetch_single_node_exporter_metric():
+    def delivery_callback(self, err, msg):
         """
-        Method to fetch metrics from Node Exporter.
-        Returns:
-            str: Metrics fetched from Node Exporter.
-        """
-        KPI = "node_network_receive_packets_total"
-        try:
-            response = requests.get(EXPORTER_ENDPOINT) # type: ignore
-            LOGGER.info("Request status {:}".format(response))
-            if response.status_code == 200:
-                # print(f"Metrics fetched sucessfully...")
-                metrics = response.text
-                # Check if the desired metric is available in the response
-                if KPI in metrics:
-                    KPI_VALUE = TelemetryBackendService.extract_metric_value(metrics, KPI)
-                    # Extract the metric value
-                    if KPI_VALUE is not None:
-                        LOGGER.info("Extracted value of {:} is {:}".format(KPI, KPI_VALUE))
-                        print(f"Extracted value of {KPI} is: {KPI_VALUE}")
-                        return KPI_VALUE
-            else:
-                LOGGER.info("Failed to fetch metrics. Status code: {:}".format(response.status_code))
-                # print(f"Failed to fetch metrics. Status code: {response.status_code}")
-                return None
-        except Exception as e:
-            LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e))
-            # print(f"Failed to fetch metrics: {str(e)}")
-            return None
-
-    @staticmethod
-    def extract_metric_value(metrics, metric_name):
-        """
-        Method to extract the value of a metric from the metrics string.
-        Args:
-            metrics (str): Metrics string fetched from Exporter.
-            metric_name (str): Name of the metric to extract.
-        Returns:
-            float: Value of the extracted metric, or None if not found.
-        """
-        try:
-            # Find the metric line containing the desired metric name
-            metric_line = next(line for line in metrics.split('\n') if line.startswith(metric_name))
-            # Split the line to extract the metric value
-            metric_value = float(metric_line.split()[1])
-            return metric_value
-        except StopIteration:
-            print(f"Metric '{metric_name}' not found in the metrics.")
-            return None
-
-    @staticmethod
-    def stream_node_export_metrics_to_raw_topic():
-        try:
-            while True:
-                response = requests.get(EXPORTER_ENDPOINT)
-                # print("Response Status {:} ".format(response))
-                # LOGGER.info("Response Status {:} ".format(response))
-                try: 
-                    if response.status_code == 200:
-                        producerObj = KafkaProducer(PRODUCER_CONFIG)
-                        producerObj.produce(KAFKA_TOPICS['raw'], key="raw", value= str(response.text), callback=TelemetryBackendService.delivery_callback)
-                        producerObj.flush()
-                        LOGGER.info("Produce to topic")
-                    else:
-                        LOGGER.info("Didn't received expected response. Status code: {:}".format(response.status_code))
-                        print(f"Didn't received expected response. Status code: {response.status_code}")
-                        return None
-                    time.sleep(15)
-                except Exception as e:
-                    LOGGER.info("Failed to process response. Status code: {:}".format(e))
-                    return None
-        except Exception as e:
-            LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e))
-            print(f"Failed to fetch metrics: {str(e)}")
-            return None
-# ----------- ABOVE: Actual Implementation of Kafka Producer with Node Exporter -----------
\ No newline at end of file
+        Callback function to handle message delivery status.
+        Args: err (KafkaError): Kafka error object.
+              msg (Message): Kafka message object.
+        """
+        if err: print(f'Message delivery failed: {err}')
+        # else:   print(f'Message delivered to topic {msg.topic()}')
+
+# # ----------- BELOW: Actual Implementation of Kafka Producer with Node Exporter -----------
+#     @staticmethod
+#     def fetch_single_node_exporter_metric():
+#         """
+#         Method to fetch metrics from Node Exporter.
+#         Returns:
+#             str: Metrics fetched from Node Exporter.
+#         """
+#         KPI = "node_network_receive_packets_total"
+#         try:
+#             response = requests.get(EXPORTER_ENDPOINT) # type: ignore
+#             LOGGER.info("Request status {:}".format(response))
+#             if response.status_code == 200:
+#                 # print(f"Metrics fetched sucessfully...")
+#                 metrics = response.text
+#                 # Check if the desired metric is available in the response
+#                 if KPI in metrics:
+#                     KPI_VALUE = TelemetryBackendService.extract_metric_value(metrics, KPI)
+#                     # Extract the metric value
+#                     if KPI_VALUE is not None:
+#                         LOGGER.info("Extracted value of {:} is {:}".format(KPI, KPI_VALUE))
+#                         print(f"Extracted value of {KPI} is: {KPI_VALUE}")
+#                         return KPI_VALUE
+#             else:
+#                 LOGGER.info("Failed to fetch metrics. Status code: {:}".format(response.status_code))
+#                 # print(f"Failed to fetch metrics. Status code: {response.status_code}")
+#                 return None
+#         except Exception as e:
+#             LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e))
+#             # print(f"Failed to fetch metrics: {str(e)}")
+#             return None
+
+#     @staticmethod
+#     def extract_metric_value(metrics, metric_name):
+#         """
+#         Method to extract the value of a metric from the metrics string.
+#         Args:
+#             metrics (str): Metrics string fetched from Exporter.
+#             metric_name (str): Name of the metric to extract.
+#         Returns:
+#             float: Value of the extracted metric, or None if not found.
+#         """
+#         try:
+#             # Find the metric line containing the desired metric name
+#             metric_line = next(line for line in metrics.split('\n') if line.startswith(metric_name))
+#             # Split the line to extract the metric value
+#             metric_value = float(metric_line.split()[1])
+#             return metric_value
+#         except StopIteration:
+#             print(f"Metric '{metric_name}' not found in the metrics.")
+#             return None
+
+#     @staticmethod
+#     def stream_node_export_metrics_to_raw_topic():
+#         try:
+#             while True:
+#                 response = requests.get(EXPORTER_ENDPOINT)
+#                 # print("Response Status {:} ".format(response))
+#                 # LOGGER.info("Response Status {:} ".format(response))
+#                 try: 
+#                     if response.status_code == 200:
+#                         producerObj = KafkaProducer(PRODUCER_CONFIG)
+#                         producerObj.produce(KAFKA_TOPICS['raw'], key="raw", value= str(response.text), callback=TelemetryBackendService.delivery_callback)
+#                         producerObj.flush()
+#                         LOGGER.info("Produce to topic")
+#                     else:
+#                         LOGGER.info("Didn't received expected response. Status code: {:}".format(response.status_code))
+#                         print(f"Didn't received expected response. Status code: {response.status_code}")
+#                         return None
+#                     time.sleep(15)
+#                 except Exception as e:
+#                     LOGGER.info("Failed to process response. Status code: {:}".format(e))
+#                     return None
+#         except Exception as e:
+#             LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e))
+#             print(f"Failed to fetch metrics: {str(e)}")
+#             return None
+# # ----------- ABOVE: Actual Implementation of Kafka Producer with Node Exporter -----------
\ No newline at end of file
diff --git a/src/telemetry/backend/service/__main__.py b/src/telemetry/backend/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ec9e191fd22e07da46f80214ade0ac516032433
--- /dev/null
+++ b/src/telemetry/backend/service/__main__.py
@@ -0,0 +1,56 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from prometheus_client import start_http_server
+from common.Settings import get_log_level, get_metrics_port
+from .TelemetryBackendService import TelemetryBackendService
+
+terminate = threading.Event()
+LOGGER = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global LOGGER # pylint: disable=global-statement
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+    LOGGER = logging.getLogger(__name__)
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.info('Starting...')
+
+    # Start metrics server
+    metrics_port = get_metrics_port()
+    start_http_server(metrics_port)
+
+    grpc_service = TelemetryBackendService()
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=1.0): pass
+
+    LOGGER.info('Terminating...')
+    grpc_service.stop()
+
+    LOGGER.info('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/telemetry/backend/tests/testTelemetryBackend.py b/src/telemetry/backend/tests/testTelemetryBackend.py
deleted file mode 100644
index d832e54e77589ca677682760d19e68b1bd09b1f7..0000000000000000000000000000000000000000
--- a/src/telemetry/backend/tests/testTelemetryBackend.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-print (sys.path)
-sys.path.append('/home/tfs/tfs-ctrl')
-import threading
-import logging
-from typing import Tuple
-# from common.proto.context_pb2 import Empty
-from src.telemetry.backend.service.TelemetryBackendService import TelemetryBackendService
-
-LOGGER = logging.getLogger(__name__)
-
-
-###########################
-# Tests Implementation of Telemetry Backend
-###########################
-
-def test_verify_kafka_topics():
-    LOGGER.info('test_verify_kafka_topics requesting')
-    TelemetryBackendServiceObj = TelemetryBackendService()
-    KafkaTopics = ['topic_request', 'topic_response', 'topic_raw', 'topic_labled']
-    response = TelemetryBackendServiceObj.create_topic_if_not_exists(KafkaTopics)
-    LOGGER.debug(str(response))
-    assert isinstance(response, bool)
-
-# def test_run_kafka_listener():
-#     LOGGER.info('test_receive_kafka_request requesting')
-#     TelemetryBackendServiceObj = TelemetryBackendService()
-#     response = TelemetryBackendServiceObj.run_kafka_listener()
-#     LOGGER.debug(str(response))
-#     assert isinstance(response, bool)
-
-# def test_fetch_node_exporter_metrics():
-#     LOGGER.info(' >>> test_fetch_node_exporter_metrics START <<< ')
-#     TelemetryBackendService.fetch_single_node_exporter_metric()
-
-def test_stream_node_export_metrics_to_raw_topic():
-    LOGGER.info(' >>> test_stream_node_export_metrics_to_raw_topic START <<< ')
-    threading.Thread(target=TelemetryBackendService.stream_node_export_metrics_to_raw_topic, args=()).start()
-
diff --git a/src/telemetry/backend/tests/test_TelemetryBackend.py b/src/telemetry/backend/tests/test_TelemetryBackend.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2bbee540c3ce348ef52eceb0e776f48a68d94b1
--- /dev/null
+++ b/src/telemetry/backend/tests/test_TelemetryBackend.py
@@ -0,0 +1,38 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from common.tools.kafka.Variables import KafkaTopic
+from telemetry.backend.service.TelemetryBackendService import TelemetryBackendService
+
+
+LOGGER = logging.getLogger(__name__)
+
+
+###########################
+# Tests Implementation of Telemetry Backend
+###########################
+
+# --- "test_validate_kafka_topics" should be run before the functionality tests ---
+def test_validate_kafka_topics():
+    LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ")
+    response = KafkaTopic.create_all_topics()
+    assert isinstance(response, bool)
+
+def test_RunRequestListener():
+    LOGGER.info('test_RunRequestListener')
+    TelemetryBackendServiceObj = TelemetryBackendService()
+    response = TelemetryBackendServiceObj.RunRequestListener()
+    LOGGER.debug(str(response))
+    assert isinstance(response, bool)
diff --git a/src/telemetry/database/TelemetryDBmanager.py b/src/telemetry/database/TelemetryDBmanager.py
deleted file mode 100644
index b558180a9e1fbf85bf523c7faededf58f57e2264..0000000000000000000000000000000000000000
--- a/src/telemetry/database/TelemetryDBmanager.py
+++ /dev/null
@@ -1,248 +0,0 @@
-# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging, time
-import sqlalchemy
-from sqlalchemy import inspect, MetaData, Table
-from sqlalchemy.orm import sessionmaker
-from telemetry.database.TelemetryModel import Collector as CollectorModel
-from telemetry.database.TelemetryModel import Kpi as KpiModel
-from sqlalchemy.ext.declarative import declarative_base
-from telemetry.database.TelemetryEngine import TelemetryEngine
-from common.proto.kpi_manager_pb2 import KpiDescriptor, KpiId
-from common.proto.telemetry_frontend_pb2 import Collector, CollectorId
-from sqlalchemy.exc import SQLAlchemyError
-from telemetry.database.TelemetryModel import Base
-
-LOGGER = logging.getLogger(__name__)
-DB_NAME = "telemetryfrontend"
-
-class TelemetryDBmanager:
-    def __init__(self):
-        self.db_engine = TelemetryEngine.get_engine()
-        if self.db_engine is None:
-            LOGGER.error('Unable to get SQLAlchemy DB Engine...')
-            return False
-        self.db_name = DB_NAME
-        self.Session = sessionmaker(bind=self.db_engine)
-
-    def create_database(self):
-        try:
-            # with self.db_engine.connect() as connection:
-            #     connection.execute(f"CREATE DATABASE {self.db_name};")
-            TelemetryEngine.create_database(self.db_engine)
-            LOGGER.info('TelemetryDBmanager initalized DB Name: {:}'.format(self.db_name))
-            return True
-        except Exception as e: # pylint: disable=bare-except # pragma: no cover
-            LOGGER.exception('Failed to check/create the database: {:s}'.format(str(e)))
-            return False
-
-    def create_tables(self):
-        try:
-            Base.metadata.create_all(self.db_engine)     # type: ignore
-            LOGGER.info("Tables created in database ({:}) the as per Models".format(self.db_name))
-        except Exception as e:
-            LOGGER.info("Tables cannot be created in the TelemetryFrontend database. {:s}".format(str(e)))
-
-    def verify_tables(self):
-        try:
-            with self.db_engine.connect() as connection:
-                result = connection.execute("SHOW TABLES;")
-                tables = result.fetchall()
-                LOGGER.info("Tables in DB: {:}".format(tables))
-        except Exception as e:
-            LOGGER.info("Unable to fetch Table names. {:s}".format(str(e)))
-
-    def drop_table(self, table_to_drop: str):
-        try:
-            inspector = inspect(self.db_engine)
-            existing_tables = inspector.get_table_names()
-            if table_to_drop in existing_tables:
-                table = Table(table_to_drop, MetaData(), autoload_with=self.db_engine)
-                table.drop(self.db_engine)
-                LOGGER.info("Tables delete in the DB Name: {:}".format(self.db_name))
-            else:
-                LOGGER.warning("No table {:} in database {:} ".format(table_to_drop, DB_NAME))
-        except Exception as e:
-            LOGGER.info("Tables cannot be deleted in the {:} database. {:s}".format(DB_NAME, str(e)))
-
-    def list_databases(self):
-        query = "SHOW DATABASES"
-        with self.db_engine.connect() as connection:
-            result = connection.execute(query)
-            databases = [row[0] for row in result]
-        LOGGER.info("List of available DBs: {:}".format(databases))
-        
-# ------------------ INSERT METHODs --------------------------------------
-
-    def inser_kpi(self, request: KpiDescriptor):
-        session = self.Session()
-        try:
-            # Create a new Kpi instance
-            kpi_to_insert                 = KpiModel()
-            kpi_to_insert.kpi_id          = request.kpi_id.kpi_id.uuid
-            kpi_to_insert.kpi_description = request.kpi_description
-            kpi_to_insert.kpi_sample_type = request.kpi_sample_type
-            kpi_to_insert.device_id       = request.service_id.service_uuid.uuid 
-            kpi_to_insert.endpoint_id     = request.device_id.device_uuid.uuid 
-            kpi_to_insert.service_id      = request.slice_id.slice_uuid.uuid 
-            kpi_to_insert.slice_id        = request.endpoint_id.endpoint_uuid.uuid
-            kpi_to_insert.connection_id   = request.connection_id.connection_uuid.uuid
-            # kpi_to_insert.link_id         = request.link_id.link_id.uuid
-            # Add the instance to the session
-            session.add(kpi_to_insert)
-            session.commit()
-            LOGGER.info("Row inserted into kpi table: {:}".format(kpi_to_insert.kpi_id))
-        except Exception as e:
-            session.rollback()
-            LOGGER.info("Failed to insert new kpi. {:s}".format(str(e)))
-        finally:
-            # Close the session
-            session.close()
-
-    # Function to insert a row into the Collector model
-    def insert_collector(self, request: Collector):
-        session = self.Session()
-        try:
-            # Create a new Collector instance
-            collector_to_insert                     = CollectorModel()
-            collector_to_insert.collector_id        = request.collector_id.collector_id.uuid
-            collector_to_insert.kpi_id              = request.kpi_id.kpi_id.uuid  
-            collector_to_insert.collector           = "Test collector description"
-            collector_to_insert.sampling_duration_s = request.duration_s
-            collector_to_insert.sampling_interval_s = request.interval_s
-            collector_to_insert.start_timestamp     = time.time()
-            collector_to_insert.end_timestamp       = time.time()
-            
-            session.add(collector_to_insert)
-            session.commit()
-            LOGGER.info("Row inserted into collector table: {:}".format(collector_to_insert.collector_id))
-        except Exception as e:
-            session.rollback()
-            LOGGER.info("Failed to insert new collector. {:s}".format(str(e)))
-        finally:
-            # Close the session
-            session.close()
-
-# ------------------ GET METHODs --------------------------------------
-
-    def get_kpi_descriptor(self, request: KpiId):
-        session = self.Session()
-        try:
-            kpi_id_to_search = request.kpi_id.uuid
-            kpi = session.query(KpiModel).filter_by(kpi_id=kpi_id_to_search).first()
-            if kpi:
-                LOGGER.info("kpi ID found: {:s}".format(str(kpi)))
-                return kpi
-            else:
-                LOGGER.warning("Kpi ID not found {:s}".format(str(kpi_id_to_search)))
-                return None
-        except Exception as e:
-            session.rollback()
-            LOGGER.info("Failed to retrieve KPI ID. {:s}".format(str(e)))
-            raise
-        finally:
-            session.close()
-
-    def get_collector(self, request: CollectorId):
-        session = self.Session()
-        try:
-            collector_id_to_search = request.collector_id.uuid
-            collector = session.query(CollectorModel).filter_by(collector_id=collector_id_to_search).first()
-            if collector:
-                LOGGER.info("collector ID found: {:s}".format(str(collector)))
-                return collector
-            else:
-                LOGGER.warning("collector ID not found{:s}".format(str(collector_id_to_search)))
-                return None
-        except Exception as e:
-            session.rollback()
-            LOGGER.info("Failed to retrieve collector ID. {:s}".format(str(e)))
-            raise
-        finally:
-            session.close()
-    
-    # ------------------ SELECT METHODs --------------------------------------
-
-    def select_kpi_descriptor(self, **filters):
-        session = self.Session()
-        try:
-            query = session.query(KpiModel)
-            for column, value in filters.items():
-                query = query.filter(getattr(KpiModel, column) == value)
-            result = query.all()
-            if len(result) != 0:
-                LOGGER.info("Fetched filtered rows from KPI table with filters : {:s}".format(str(result)))
-            else:
-                LOGGER.warning("No matching row found : {:s}".format(str(result)))
-            return result
-        except SQLAlchemyError as e:
-            LOGGER.error("Error fetching filtered rows from KPI table with filters {:}: {:}".format(filters, e))
-            return []
-        finally:
-            session.close()
-    
-    def select_collector(self, **filters):
-        session = self.Session()
-        try:
-            query = session.query(CollectorModel)
-            for column, value in filters.items():
-                query = query.filter(getattr(CollectorModel, column) == value)
-            result = query.all()
-            if len(result) != 0:
-                LOGGER.info("Fetched filtered rows from KPI table with filters : {:s}".format(str(result)))
-            else:
-                LOGGER.warning("No matching row found : {:s}".format(str(result)))            
-            return result
-        except SQLAlchemyError as e:
-            LOGGER.error("Error fetching filtered rows from KPI table with filters {:}: {:}".format(filters, e))
-            return []
-        finally:
-            session.close()
-
-# ------------------ DELETE METHODs --------------------------------------
-
-    def delete_kpi_descriptor(self, request: KpiId):
-        session = self.Session()
-        try:
-            kpi_id_to_delete = request.kpi_id.uuid
-            kpi = session.query(KpiModel).filter_by(kpi_id=kpi_id_to_delete).first()
-            if kpi:
-                session.delete(kpi)
-                session.commit()
-                LOGGER.info("Deleted KPI with kpi_id: %s", kpi_id_to_delete)
-            else:
-                LOGGER.warning("KPI with kpi_id %s not found", kpi_id_to_delete)
-        except SQLAlchemyError as e:
-            session.rollback()
-            LOGGER.error("Error deleting KPI with kpi_id %s: %s", kpi_id_to_delete, e)
-        finally:
-            session.close()
-
-    def delete_collector(self, request: CollectorId):
-        session = self.Session()
-        try:
-            collector_id_to_delete = request.collector_id.uuid
-            collector = session.query(CollectorModel).filter_by(collector_id=collector_id_to_delete).first()
-            if collector:
-                session.delete(collector)
-                session.commit()
-                LOGGER.info("Deleted collector with collector_id: %s", collector_id_to_delete)
-            else:
-                LOGGER.warning("collector with collector_id %s not found", collector_id_to_delete)
-        except SQLAlchemyError as e:
-            session.rollback()
-            LOGGER.error("Error deleting collector with collector_id %s: %s", collector_id_to_delete, e)
-        finally:
-            session.close()
\ No newline at end of file
diff --git a/src/telemetry/database/TelemetryEngine.py b/src/telemetry/database/TelemetryEngine.py
index a563fa09f94c812aed07d0aa3cbd5bc988737fc4..7c8620faf25e695e7f971bce78be9ad208a7701b 100644
--- a/src/telemetry/database/TelemetryEngine.py
+++ b/src/telemetry/database/TelemetryEngine.py
@@ -12,48 +12,29 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, sqlalchemy, sqlalchemy_utils
-# from common.Settings import get_setting
+import logging, sqlalchemy
+from common.Settings import get_setting
 
 LOGGER = logging.getLogger(__name__)
-
-APP_NAME = 'tfs'
-ECHO = False                # False: No dump SQL commands and transactions executed
-CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}'
-# CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}'
+CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}'
 
 class TelemetryEngine:
-    # def __init__(self):
-    #     self.engine = self.get_engine()
     @staticmethod
     def get_engine() -> sqlalchemy.engine.Engine:
-        CRDB_NAMESPACE = "crdb"
-        CRDB_SQL_PORT  = "26257"
-        CRDB_DATABASE  = "telemetryfrontend"
-        CRDB_USERNAME  = "tfs"
-        CRDB_PASSWORD  = "tfs123"
-        CRDB_SSLMODE   = "require"
-        crdb_uri = CRDB_URI_TEMPLATE.format(
-                CRDB_USERNAME, CRDB_PASSWORD, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
-        # crdb_uri = CRDB_URI_TEMPLATE.format(
-        #         CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
+        crdb_uri = get_setting('CRDB_URI', default=None)
+        if crdb_uri is None:
+            CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE')
+            CRDB_SQL_PORT  = get_setting('CRDB_SQL_PORT')
+            CRDB_DATABASE  = "tfs-telemetry"             # TODO: define variable get_setting('CRDB_DATABASE_KPI_MGMT')
+            CRDB_USERNAME  = get_setting('CRDB_USERNAME')
+            CRDB_PASSWORD  = get_setting('CRDB_PASSWORD')
+            CRDB_SSLMODE   = get_setting('CRDB_SSLMODE')
+            crdb_uri = CRDB_URI_TEMPLATE.format(
+                CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
         try:
-            # engine = sqlalchemy.create_engine(
-            #     crdb_uri, connect_args={'application_name': APP_NAME}, echo=ECHO, future=True)
             engine = sqlalchemy.create_engine(crdb_uri, echo=False)
-            LOGGER.info(' TelemetryDBmanager initalized with DB URL: {:}'.format(crdb_uri))
+            LOGGER.info(' TelemetryDB initalized with DB URL: {:}'.format(crdb_uri))
         except: # pylint: disable=bare-except # pragma: no cover
             LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri)))
             return None # type: ignore
-        return engine # type: ignore
-
-    @staticmethod
-    def create_database(engine : sqlalchemy.engine.Engine) -> None:
-        if not sqlalchemy_utils.database_exists(engine.url):
-            LOGGER.info("Database created. {:}".format(engine.url))
-            sqlalchemy_utils.create_database(engine.url)
-
-    @staticmethod
-    def drop_database(engine : sqlalchemy.engine.Engine) -> None:
-        if sqlalchemy_utils.database_exists(engine.url):
-            sqlalchemy_utils.drop_database(engine.url)
+        return engine
diff --git a/src/telemetry/database/TelemetryModel.py b/src/telemetry/database/TelemetryModel.py
index be4f0969c86638520cf226b8e42db90426165804..4e71ce8138af39e51c80791dbd6683d855231d7b 100644
--- a/src/telemetry/database/TelemetryModel.py
+++ b/src/telemetry/database/TelemetryModel.py
@@ -14,32 +14,60 @@
 
 import logging
 from sqlalchemy.dialects.postgresql import UUID
-from sqlalchemy import Column, Integer, String, Float, Text, ForeignKey
-from sqlalchemy.ext.declarative import declarative_base
-from sqlalchemy.orm import sessionmaker, relationship
+from sqlalchemy import Column, String, Float
 from sqlalchemy.orm import registry
+from common.proto import telemetry_frontend_pb2
 
 logging.basicConfig(level=logging.INFO)
 LOGGER = logging.getLogger(__name__)
 
 # Create a base class for declarative models
 Base = registry().generate_base()
-# Base = declarative_base()
     
 class Collector(Base):
     __tablename__ = 'collector'
 
     collector_id         = Column(UUID(as_uuid=False), primary_key=True)
-    kpi_id               = Column(UUID(as_uuid=False))
-    collector_decription = Column(String)
-    sampling_duration_s  = Column(Float)
-    sampling_interval_s  = Column(Float)
-    start_timestamp      = Column(Float)
-    end_timestamp        = Column(Float)
-
+    kpi_id               = Column(UUID(as_uuid=False), nullable=False)
+    sampling_duration_s  = Column(Float              , nullable=False)
+    sampling_interval_s  = Column(Float              , nullable=False)
+    start_timestamp      = Column(Float              , nullable=False)
+    end_timestamp        = Column(Float              , nullable=False)
 
+    # helps in logging the information
     def __repr__(self):
-        return (f"<Collector(collector_id='{self.collector_id}', kpi_id='{self.kpi_id}', "
-                f"collector='{self.collector_decription}', sampling_duration_s='{self.sampling_duration_s}', "
-                f"sampling_interval_s='{self.sampling_interval_s}', start_timestamp='{self.start_timestamp}', "
-                f"end_timestamp='{self.end_timestamp}')>")
\ No newline at end of file
+        return (f"<Collector(collector_id='{self.collector_id}'   , kpi_id='{self.kpi_id}', "
+                f"sampling_duration_s='{self.sampling_duration_s}', sampling_interval_s='{self.sampling_interval_s}',"
+                f"start_timestamp='{self.start_timestamp}'        , end_timestamp='{self.end_timestamp}')>")
+
+    @classmethod
+    def ConvertCollectorToRow(cls, request):
+        """
+        Create an instance of Collector table rows from a request object.
+        Args:    request: The request object containing collector gRPC message.
+        Returns: A row (an instance of Collector table) initialized with content of the request.
+        """
+        return cls(
+            collector_id         = request.collector_id.collector_id.uuid,
+            kpi_id               = request.kpi_id.kpi_id.uuid,
+            sampling_duration_s  = request.duration_s,
+            sampling_interval_s  = request.interval_s,
+            start_timestamp      = request.start_time.timestamp,
+            end_timestamp        = request.end_time.timestamp
+        )
+
+    @classmethod
+    def ConvertRowToCollector(cls, row):
+        """
+        Create and return a dictionary representation of a Collector table instance.       
+        Args:   row: The Collector table instance (row) containing the data.
+        Returns: collector gRPC message initialized with the content of a row.
+        """
+        response                                = telemetry_frontend_pb2.Collector()
+        response.collector_id.collector_id.uuid = row.collector_id
+        response.kpi_id.kpi_id.uuid             = row.kpi_id
+        response.duration_s                     = row.sampling_duration_s
+        response.interval_s                     = row.sampling_interval_s
+        response.start_time.timestamp           = row.start_timestamp
+        response.end_time.timestamp             = row.end_timestamp
+        return response
diff --git a/src/telemetry/database/Telemetry_DB.py b/src/telemetry/database/Telemetry_DB.py
new file mode 100644
index 0000000000000000000000000000000000000000..32acfd73a410a7bfddd6b487d0b1962afadb3842
--- /dev/null
+++ b/src/telemetry/database/Telemetry_DB.py
@@ -0,0 +1,137 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import sqlalchemy_utils
+from sqlalchemy import inspect
+from sqlalchemy.orm import sessionmaker
+from telemetry.database.TelemetryModel import Collector as CollectorModel
+from telemetry.database.TelemetryEngine import TelemetryEngine
+from common.method_wrappers.ServiceExceptions import (
+    OperationFailedException, AlreadyExistsException )
+
+LOGGER = logging.getLogger(__name__)
+DB_NAME = "tfs_telemetry"
+
+class TelemetryDB:
+    def __init__(self):
+        self.db_engine = TelemetryEngine.get_engine()
+        if self.db_engine is None:
+            LOGGER.error('Unable to get SQLAlchemy DB Engine...')
+            return False
+        self.db_name = DB_NAME
+        self.Session = sessionmaker(bind=self.db_engine)
+
+    def create_database(self):
+        if not sqlalchemy_utils.database_exists(self.db_engine.url):
+            LOGGER.debug("Database created. {:}".format(self.db_engine.url))
+            sqlalchemy_utils.create_database(self.db_engine.url)
+
+    def drop_database(self) -> None:
+        if sqlalchemy_utils.database_exists(self.db_engine.url):
+            sqlalchemy_utils.drop_database(self.db_engine.url)
+
+    def create_tables(self):
+        try:
+            CollectorModel.metadata.create_all(self.db_engine)     # type: ignore
+            LOGGER.debug("Tables created in the database: {:}".format(self.db_name))
+        except Exception as e:
+            LOGGER.debug("Tables cannot be created in the database. {:s}".format(str(e)))
+            raise OperationFailedException ("Tables can't be created", extra_details=["unable to create table {:}".format(e)])
+
+    def verify_tables(self):
+        try:
+            inspect_object = inspect(self.db_engine)
+            if(inspect_object.has_table('collector', None)):
+                LOGGER.info("Table exists in DB: {:}".format(self.db_name))
+        except Exception as e:
+            LOGGER.info("Unable to fetch Table names. {:s}".format(str(e)))
+
+# ----------------- CURD METHODs ---------------------
+
+    def add_row_to_db(self, row):
+        session = self.Session()
+        try:
+            session.add(row)
+            session.commit()
+            LOGGER.debug(f"Row inserted into {row.__class__.__name__} table.")
+            return True
+        except Exception as e:
+            session.rollback()
+            if "psycopg2.errors.UniqueViolation" in str(e):
+                LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}")
+                raise AlreadyExistsException(row.__class__.__name__, row,
+                                             extra_details=["Unique key voilation: {:}".format(e)] )
+            else:
+                LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}")
+                raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)])
+        finally:
+            session.close()
+    
+    def search_db_row_by_id(self, model, col_name, id_to_search):
+        session = self.Session()
+        try:
+            entity = session.query(model).filter_by(**{col_name: id_to_search}).first()
+            if entity:
+                # LOGGER.debug(f"{model.__name__} ID found: {str(entity)}")
+                return entity
+            else:
+                LOGGER.debug(f"{model.__name__} ID not found, No matching row: {str(id_to_search)}")
+                print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search))
+                return None
+        except Exception as e:
+            session.rollback()
+            LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}")
+            raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)])
+        finally:
+            session.close()
+    
+    def delete_db_row_by_id(self, model, col_name, id_to_search):
+        session = self.Session()
+        try:
+            record = session.query(model).filter_by(**{col_name: id_to_search}).first()
+            if record:
+                session.delete(record)
+                session.commit()
+                LOGGER.debug("Deleted %s with %s: %s", model.__name__, col_name, id_to_search)
+            else:
+                LOGGER.debug("%s with %s %s not found", model.__name__, col_name, id_to_search)
+                return None
+        except Exception as e:
+            session.rollback()
+            LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e)
+            raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)])
+        finally:
+            session.close()
+    
+    def select_with_filter(self, model, filter_object):
+        session = self.Session()
+        try:
+            query = session.query(CollectorModel)
+            # Apply filters based on the filter_object
+            if filter_object.kpi_id:
+                query = query.filter(CollectorModel.kpi_id.in_([k.kpi_id.uuid for k in filter_object.kpi_id]))     
+            result = query.all()
+            # query should be added to return all rows
+            if result:
+                LOGGER.debug(f"Fetched filtered rows from {model.__name__} table with filters: {filter_object}") #  - Results: {result}
+            else:
+                LOGGER.warning(f"No matching row found in {model.__name__} table with filters: {filter_object}")
+            return result
+        except Exception as e:
+            LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filter_object} ::: {e}")
+            raise OperationFailedException ("Select by filter", extra_details=["unable to apply the filter {:}".format(e)])
+        finally:
+            session.close()
+
diff --git a/src/telemetry/database/managementDB.py b/src/telemetry/database/managementDB.py
deleted file mode 100644
index f79126f279d7bbece6c08ae5eb1cd74e340d1c7d..0000000000000000000000000000000000000000
--- a/src/telemetry/database/managementDB.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging, time
-import sqlalchemy
-import sqlalchemy_utils
-from sqlalchemy.orm import sessionmaker
-from sqlalchemy.ext.declarative import declarative_base
-from telemetry.database.TelemetryEngine import TelemetryEngine
-from telemetry.database.TelemetryModel import Base
-
-LOGGER = logging.getLogger(__name__)
-DB_NAME = "telemetryfrontend"
-
-# # Create a base class for declarative models
-# Base = declarative_base()
-
-class managementDB:
-    def __init__(self):
-        self.db_engine = TelemetryEngine.get_engine()
-        if self.db_engine is None:
-            LOGGER.error('Unable to get SQLAlchemy DB Engine...')
-            return False
-        self.db_name = DB_NAME
-        self.Session = sessionmaker(bind=self.db_engine)
-
-    @staticmethod
-    def create_database(engine : sqlalchemy.engine.Engine) -> None:
-        if not sqlalchemy_utils.database_exists(engine.url):
-            LOGGER.info("Database created. {:}".format(engine.url))
-            sqlalchemy_utils.create_database(engine.url)
-
-    @staticmethod
-    def drop_database(engine : sqlalchemy.engine.Engine) -> None:
-        if sqlalchemy_utils.database_exists(engine.url):
-            sqlalchemy_utils.drop_database(engine.url)
-
-    # def create_database(self):
-    #     try:
-    #         with self.db_engine.connect() as connection:
-    #             connection.execute(f"CREATE DATABASE {self.db_name};")
-    #         LOGGER.info('managementDB initalizes database. Name: {self.db_name}')
-    #         return True
-    #     except: 
-    #         LOGGER.exception('Failed to check/create the database: {:s}'.format(str(self.db_engine.url)))
-    #         return False
-    
-    @staticmethod
-    def create_tables(engine : sqlalchemy.engine.Engine):
-        try:
-            Base.metadata.create_all(engine)     # type: ignore
-            LOGGER.info("Tables created in the DB Name: {:}".format(DB_NAME))
-        except Exception as e:
-            LOGGER.info("Tables cannot be created in the TelemetryFrontend database. {:s}".format(str(e)))
-
-    def verify_tables(self):
-        try:
-            with self.db_engine.connect() as connection:
-                result = connection.execute("SHOW TABLES;")
-                tables = result.fetchall()      # type: ignore
-                LOGGER.info("Tables verified: {:}".format(tables))
-        except Exception as e:
-            LOGGER.info("Unable to fetch Table names. {:s}".format(str(e)))
-
-    @staticmethod
-    def add_row_to_db(self, row):
-        session = self.Session()
-        try:
-            session.add(row)
-            session.commit()
-            LOGGER.info(f"Row inserted into {row.__class__.__name__} table.")
-        except Exception as e:
-            session.rollback()
-            LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}")
-        finally:
-            session.close()
-    
-    def search_db_row_by_id(self, model, col_name, id_to_search):
-        session = self.Session()
-        try:
-            entity = session.query(model).filter_by(**{col_name: id_to_search}).first()
-            if entity:
-                LOGGER.info(f"{model.__name__} ID found: {str(entity)}")
-                return entity
-            else:
-                LOGGER.warning(f"{model.__name__} ID not found: {str(id_to_search)}")
-                return None
-        except Exception as e:
-            session.rollback()
-            LOGGER.info(f"Failed to retrieve {model.__name__} ID. {str(e)}")
-            raise
-        finally:
-            session.close()
-    
-    def delete_db_row_by_id(self, model, col_name, id_to_search):
-        session = self.Session()
-        try:
-            record = session.query(model).filter_by(**{col_name: id_to_search}).first()
-            if record:
-                session.delete(record)
-                session.commit()
-                LOGGER.info("Deleted %s with %s: %s", model.__name__, col_name, id_to_search)
-            else:
-                LOGGER.warning("%s with %s %s not found", model.__name__, col_name, id_to_search)
-        except Exception as e:
-            session.rollback()
-            LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e)
-        finally:
-            session.close()
-    
-    def select_with_filter(self, model, **filters):
-        session = self.Session()
-        try:
-            query = session.query(model)
-            for column, value in filters.items():
-                query = query.filter(getattr(model, column) == value) # type: ignore   
-            result = query.all()
-            if result:
-                LOGGER.info(f"Fetched filtered rows from {model.__name__} table with filters: {filters}") #  - Results: {result}
-            else:
-                LOGGER.warning(f"No matching row found in {model.__name__} table with filters: {filters}")
-            return result
-        except Exception as e:
-            LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filters} ::: {e}")
-            return []
-        finally:
-            session.close()
\ No newline at end of file
diff --git a/src/telemetry/database/tests/telemetryDBtests.py b/src/telemetry/database/tests/telemetryDBtests.py
deleted file mode 100644
index 0d221106419d6e4ee4b313adf10c90c5e6be7666..0000000000000000000000000000000000000000
--- a/src/telemetry/database/tests/telemetryDBtests.py
+++ /dev/null
@@ -1,86 +0,0 @@
-
-# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from typing import Any
-from sqlalchemy.ext.declarative import declarative_base
-from telemetry.database.TelemetryDBmanager import TelemetryDBmanager
-from telemetry.database.TelemetryEngine import TelemetryEngine
-from telemetry.database.tests import temp_DB
-from .messages import create_kpi_request, create_collector_request, \
-                        create_kpi_id_request, create_kpi_filter_request, \
-                        create_collector_id_request, create_collector_filter_request
-
-logging.basicConfig(level=logging.INFO)
-LOGGER = logging.getLogger(__name__)
-
-
-# def test_temp_DB():
-#     temp_DB.main()
-
-def test_telemetry_object_creation():
-    LOGGER.info('--- test_telemetry_object_creation: START')
-
-    LOGGER.info('>>> Creating TelemetryDBmanager Object <<< ')
-    TelemetryDBmanagerObj = TelemetryDBmanager()
-    TelemetryEngine.create_database(TelemetryDBmanagerObj.db_engine)        # creates 'frontend' db, if it doesnot exists.
-
-    LOGGER.info('>>> Creating database <<< ')
-    TelemetryDBmanagerObj.create_database()
-
-    LOGGER.info('>>> verifing database <<< ')
-    TelemetryDBmanagerObj.list_databases()
-
-    # # LOGGER.info('>>> Droping Tables: ')
-    # # TelemetryDBmanagerObj.drop_table("table_naem_here")
-
-    LOGGER.info('>>> Creating Tables <<< ')
-    TelemetryDBmanagerObj.create_tables()
-
-    LOGGER.info('>>> Verifing Table creation <<< ')
-    TelemetryDBmanagerObj.verify_tables()
-
-    # LOGGER.info('>>> TESTING: Row Insertion Operation: kpi Table <<<')
-    # kpi_obj = create_kpi_request()
-    # TelemetryDBmanagerObj.inser_kpi(kpi_obj)
-
-    # LOGGER.info('>>> TESTING: Row Insertion Operation: collector Table <<<')
-    # collector_obj = create_collector_request()
-    # TelemetryDBmanagerObj.insert_collector(collector_obj)
-
-    # LOGGER.info('>>> TESTING: Get KpiDescriptor  <<<')
-    # kpi_id_obj = create_kpi_id_request()
-    # TelemetryDBmanagerObj.get_kpi_descriptor(kpi_id_obj)
-
-    # LOGGER.info('>>> TESTING: Select Collector  <<<')
-    # collector_id_obj = create_collector_id_request()
-    # TelemetryDBmanagerObj.get_collector(collector_id_obj)
-
-    # LOGGER.info('>>> TESTING: Applying kpi filter  <<< ')
-    # kpi_filter : dict[str, Any] = create_kpi_filter_request()
-    # TelemetryDBmanagerObj.select_kpi_descriptor(**kpi_filter)
-
-    # LOGGER.info('>>> TESTING: Applying collector filter   <<<')
-    # collector_filter : dict[str, Any] = create_collector_filter_request()
-    # TelemetryDBmanagerObj.select_collector(**collector_filter)
-    
-    # LOGGER.info('>>> TESTING: Delete KpiDescriptor ')
-    # kpi_id_obj = create_kpi_id_request()
-    # TelemetryDBmanagerObj.delete_kpi_descriptor(kpi_id_obj)
-
-    # LOGGER.info('>>> TESTING: Delete Collector ')
-    # collector_id_obj = create_collector_id_request()
-    # TelemetryDBmanagerObj.delete_collector(collector_id_obj)
-    
\ No newline at end of file
diff --git a/src/telemetry/database/tests/temp_DB.py b/src/telemetry/database/tests/temp_DB.py
deleted file mode 100644
index 089d3542492c2da87b839416f7118749bb82caad..0000000000000000000000000000000000000000
--- a/src/telemetry/database/tests/temp_DB.py
+++ /dev/null
@@ -1,327 +0,0 @@
-from sqlalchemy import create_engine, Column, String, Integer, Text, Float, ForeignKey
-from sqlalchemy.ext.declarative import declarative_base
-from sqlalchemy.orm import sessionmaker, relationship
-from sqlalchemy.dialects.postgresql import UUID
-import logging
-
-LOGGER = logging.getLogger(__name__)
-Base = declarative_base()
-
-class Kpi(Base):
-    __tablename__ = 'kpi'
-
-    kpi_id          = Column(UUID(as_uuid=False), primary_key=True)
-    kpi_description = Column(Text)
-    kpi_sample_type = Column(Integer)
-    device_id       = Column(String)
-    endpoint_id     = Column(String)
-    service_id      = Column(String)
-    slice_id        = Column(String)
-    connection_id   = Column(String)
-    link_id         = Column(String)
-
-    collectors = relationship('Collector', back_populates='kpi')
-
-    def __repr__(self):
-        return (f"<Kpi(kpi_id='{self.kpi_id}', kpi_description='{self.kpi_description}', "
-                f"kpi_sample_type='{self.kpi_sample_type}', device_id='{self.device_id}', "
-                f"endpoint_id='{self.endpoint_id}', service_id='{self.service_id}', "
-                f"slice_id='{self.slice_id}', connection_id='{self.connection_id}', "
-                f"link_id='{self.link_id}')>")
-    
-class Collector(Base):
-    __tablename__ = 'collector'
-
-    collector_id        = Column(UUID(as_uuid=False), primary_key=True)
-    kpi_id              = Column(UUID(as_uuid=False), ForeignKey('kpi.kpi_id'))
-    collector           = Column(String)
-    sampling_duration_s = Column(Float)
-    sampling_interval_s = Column(Float)
-    start_timestamp     = Column(Float)
-    end_timestamp       = Column(Float)
-
-    kpi = relationship('Kpi', back_populates='collectors')
-
-    def __repr__(self):
-        return (f"<Collector(collector_id='{self.collector_id}', kpi_id='{self.kpi_id}', "
-                f"collector='{self.collector}', sampling_duration_s='{self.sampling_duration_s}', "
-                f"sampling_interval_s='{self.sampling_interval_s}', start_timestamp='{self.start_timestamp}', "
-                f"end_timestamp='{self.end_timestamp}')>")
-
-class DatabaseManager:
-    def __init__(self, db_url, db_name):
-        self.engine = create_engine(db_url)
-        self.db_name = db_name
-        self.Session = sessionmaker(bind=self.engine)
-        LOGGER.info("DatabaseManager initialized with DB URL: %s and DB Name: %s", db_url, db_name)
-
-    def create_database(self):
-        try:
-            with self.engine.connect() as connection:
-                connection.execute(f"CREATE DATABASE {self.db_name};")
-            LOGGER.info("Database '%s' created successfully.", self.db_name)
-        except Exception as e:
-            LOGGER.error("Error creating database '%s': %s", self.db_name, e)
-        finally:
-            LOGGER.info("create_database method execution finished.")
-
-    def create_tables(self):
-        try:
-            Base.metadata.create_all(self.engine)
-            LOGGER.info("Tables created successfully.")
-        except Exception as e:
-            LOGGER.error("Error creating tables: %s", e)
-        finally:
-            LOGGER.info("create_tables method execution finished.")
-
-    def verify_table_creation(self):
-        try:
-            with self.engine.connect() as connection:
-                result = connection.execute("SHOW TABLES;")
-                tables = result.fetchall()
-                LOGGER.info("Tables verified: %s", tables)
-                return tables
-        except Exception as e:
-            LOGGER.error("Error verifying table creation: %s", e)
-            return []
-        finally:
-            LOGGER.info("verify_table_creation method execution finished.")
-
-    def insert_row_kpi(self, kpi_data):
-        session = self.Session()
-        try:
-            new_kpi = Kpi(**kpi_data)
-            session.add(new_kpi)
-            session.commit()
-            LOGGER.info("Inserted row into KPI table: %s", kpi_data)
-        except Exception as e:
-            session.rollback()
-            LOGGER.error("Error inserting row into KPI table: %s", e)
-        finally:
-            session.close()
-            LOGGER.info("insert_row_kpi method execution finished.")
-
-    def insert_row_collector(self, collector_data):
-        session = self.Session()
-        try:
-            new_collector = Collector(**collector_data)
-            session.add(new_collector)
-            session.commit()
-            LOGGER.info("Inserted row into Collector table: %s", collector_data)
-        except Exception as e:
-            session.rollback()
-            LOGGER.error("Error inserting row into Collector table: %s", e)
-        finally:
-            session.close()
-            LOGGER.info("insert_row_collector method execution finished.")
-
-    def verify_insertion_kpi(self, kpi_id):
-        session = self.Session()
-        try:
-            kpi = session.query(Kpi).filter_by(kpi_id=kpi_id).first()
-            LOGGER.info("Verified insertion in KPI table for kpi_id: %s, Result: %s", kpi_id, kpi)
-            return kpi
-        except Exception as e:
-            LOGGER.error("Error verifying insertion in KPI table for kpi_id %s: %s", kpi_id, e)
-            return None
-        finally:
-            session.close()
-            LOGGER.info("verify_insertion_kpi method execution finished.")
-
-    def verify_insertion_collector(self, collector_id):
-        session = self.Session()
-        try:
-            collector = session.query(Collector).filter_by(collector_id=collector_id).first()
-            LOGGER.info("Verified insertion in Collector table for collector_id: %s, Result: %s", collector_id, collector)
-            return collector
-        except Exception as e:
-            LOGGER.error("Error verifying insertion in Collector table for collector_id %s: %s", collector_id, e)
-            return None
-        finally:
-            session.close()
-            LOGGER.info("verify_insertion_collector method execution finished.")
-
-    def get_all_kpi_rows(self):
-        session = self.Session()
-        try:
-            kpi_rows = session.query(Kpi).all()
-            LOGGER.info("Fetched all rows from KPI table: %s", kpi_rows)
-            return kpi_rows
-        except Exception as e:
-            LOGGER.error("Error fetching all rows from KPI table: %s", e)
-            return []
-        finally:
-            session.close()
-            LOGGER.info("get_all_kpi_rows method execution finished.")
-
-    def get_all_collector_rows(self):
-        session = self.Session()
-        try:
-            collector_rows = session.query(Collector).all()
-            LOGGER.info("Fetched all rows from Collector table: %s", collector_rows)
-            return collector_rows
-        except Exception as e:
-            LOGGER.error("Error fetching all rows from Collector table: %s", e)
-            return []
-        finally:
-            session.close()
-            LOGGER.info("get_all_collector_rows method execution finished.")
-
-    def get_filtered_kpi_rows(self, **filters):
-        session = self.Session()
-        try:
-            query = session.query(Kpi)
-            for column, value in filters.items():
-                query = query.filter(getattr(Kpi, column) == value)
-            result = query.all()
-            LOGGER.info("Fetched filtered rows from KPI table with filters ---------- : {:s}".format(str(result)))
-            return result
-        except NoResultFound:
-            LOGGER.warning("No results found in KPI table with filters %s", filters)
-            return []
-        except Exception as e:
-            LOGGER.error("Error fetching filtered rows from KPI table with filters %s: %s", filters, e)
-            return []
-        finally:
-            session.close()
-            LOGGER.info("get_filtered_kpi_rows method execution finished.")
-
-    def get_filtered_collector_rows(self, **filters):
-        session = self.Session()
-        try:
-            query = session.query(Collector)
-            for column, value in filters.items():
-                query = query.filter(getattr(Collector, column) == value)
-            result = query.all()
-            LOGGER.info("Fetched filtered rows from Collector table with filters %s: %s", filters, result)
-            return result
-        except NoResultFound:
-            LOGGER.warning("No results found in Collector table with filters %s", filters)
-            return []
-        except Exception as e:
-            LOGGER.error("Error fetching filtered rows from Collector table with filters %s: %s", filters, e)
-            return []
-        finally:
-            session.close()
-            LOGGER.info("get_filtered_collector_rows method execution finished.")
-
-    def delete_kpi_by_id(self, kpi_id):
-        session = self.Session()
-        try:
-            kpi = session.query(Kpi).filter_by(kpi_id=kpi_id).first()
-            if kpi:
-                session.delete(kpi)
-                session.commit()
-                LOGGER.info("Deleted KPI with kpi_id: %s", kpi_id)
-            else:
-                LOGGER.warning("KPI with kpi_id %s not found", kpi_id)
-        except SQLAlchemyError as e:
-            session.rollback()
-            LOGGER.error("Error deleting KPI with kpi_id %s: %s", kpi_id, e)
-        finally:
-            session.close()
-            LOGGER.info("delete_kpi_by_id method execution finished.")
-
-    def delete_collector_by_id(self, collector_id):
-        session = self.Session()
-        try:
-            collector = session.query(Collector).filter_by(collector_id=collector_id).first()
-            if collector:
-                session.delete(collector)
-                session.commit()
-                LOGGER.info("Deleted Collector with collector_id: %s", collector_id)
-            else:
-                LOGGER.warning("Collector with collector_id %s not found", collector_id)
-        except SQLAlchemyError as e:
-            session.rollback()
-            LOGGER.error("Error deleting Collector with collector_id %s: %s", collector_id, e)
-        finally:
-            session.close()
-            LOGGER.info("delete_collector_by_id method execution finished.")
-
-
-# Example Usage
-def main():
-    CRDB_SQL_PORT  = "26257"
-    CRDB_DATABASE  = "telemetryfrontend"
-    CRDB_USERNAME  = "tfs"
-    CRDB_PASSWORD  = "tfs123"
-    CRDB_SSLMODE   = "require"    
-    CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}'
-    crdb_uri = CRDB_URI_TEMPLATE.format(
-            CRDB_USERNAME, CRDB_PASSWORD, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
-    # db_url = "cockroachdb://username:password@localhost:26257/"
-    # db_name = "yourdatabase"
-    db_manager = DatabaseManager(crdb_uri, CRDB_DATABASE)
-
-    # Create database
-    db_manager.create_database()
-
-    # Update db_url to include the new database name
-    db_manager.engine = create_engine(f"{crdb_uri}")
-    db_manager.Session = sessionmaker(bind=db_manager.engine)
-
-    # Create tables
-    db_manager.create_tables()
-
-    # Verify table creation
-    tables = db_manager.verify_table_creation()
-    LOGGER.info('Tables in the database: {:s}'.format(str(tables)))    
-
-    # Insert a row into the KPI table
-    kpi_data = {
-        'kpi_id': '123e4567-e89b-12d3-a456-426614174100',
-        'kpi_description': 'Sample KPI',
-        'kpi_sample_type': 1,
-        'device_id': 'device_1',
-        'endpoint_id': 'endpoint_1',
-        'service_id': 'service_1',
-        'slice_id': 'slice_1',
-        'connection_id': 'conn_1',
-        'link_id': 'link_1'
-    }
-    db_manager.insert_row_kpi(kpi_data)
-
-    # Insert a row into the Collector table
-    collector_data = {
-        'collector_id': '123e4567-e89b-12d3-a456-426614174101',
-        'kpi_id': '123e4567-e89b-12d3-a456-426614174000',
-        'collector': 'Collector 1',
-        'sampling_duration_s': 60.0,
-        'sampling_interval_s': 10.0,
-        'start_timestamp': 1625247600.0,
-        'end_timestamp': 1625247660.0
-    }
-    db_manager.insert_row_collector(collector_data)
-
-    # Verify insertion into KPI table
-    kpi = db_manager.verify_insertion_kpi('123e4567-e89b-12d3-a456-426614174000')
-    print("Inserted KPI:", kpi)
-
-    # Verify insertion into Collector table
-    collector = db_manager.verify_insertion_collector('123e4567-e89b-12d3-a456-426614174001')
-    print("Inserted Collector:", collector)
-
-    # Get all rows from KPI table
-    all_kpi_rows = db_manager.get_all_kpi_rows()
-    LOGGER.info("All KPI Rows: %s", all_kpi_rows)
-
-    # Get all rows from Collector table
-    all_collector_rows = db_manager.get_all_collector_rows()
-    LOGGER.info("All Collector Rows: %s", all_collector_rows)
-
-    # Get filtered rows from KPI table
-    filtered_kpi_rows = db_manager.get_filtered_kpi_rows(kpi_description='Sample KPI')
-    LOGGER.info("Filtered KPI Rows: %s", filtered_kpi_rows)
-
-    # Get filtered rows from Collector table
-    filtered_collector_rows = db_manager.get_filtered_collector_rows(collector='Collector 1')
-    LOGGER.info("Filtered Collector Rows: %s", filtered_collector_rows)
-
-    # Delete a KPI by kpi_id
-    kpi_id_to_delete = '123e4567-e89b-12d3-a456-426614174000'
-    db_manager.delete_kpi_by_id(kpi_id_to_delete)
-
-    # Delete a Collector by collector_id
-    collector_id_to_delete = '123e4567-e89b-12d3-a456-426614174001'
-    db_manager.delete_collector_by_id(collector_id_to_delete)
diff --git a/src/telemetry/frontend/Dockerfile b/src/telemetry/frontend/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..7125d31fe74f7c44a52c2783369c2dc7a4a31160
--- /dev/null
+++ b/src/telemetry/frontend/Dockerfile
@@ -0,0 +1,70 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ git && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/telemetry/frontend
+WORKDIR /var/teraflow/telemetry/frontend
+COPY src/telemetry/frontend/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/telemetry/__init__.py telemetry/__init__.py
+COPY src/telemetry/frontend/. telemetry/frontend/
+COPY src/telemetry/database/. telemetry/database/
+
+# Start the service
+ENTRYPOINT ["python", "-m", "telemetry.frontend.service"]
diff --git a/src/telemetry/frontend/requirements.in b/src/telemetry/frontend/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..231dc04e820387c95ffea72cbe67b9f0a9a0865a
--- /dev/null
+++ b/src/telemetry/frontend/requirements.in
@@ -0,0 +1,19 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+confluent-kafka==2.3.*
+psycopg2-binary==2.9.*
+SQLAlchemy==1.4.*
+sqlalchemy-cockroachdb==1.4.*
+SQLAlchemy-Utils==0.38.*
diff --git a/src/telemetry/frontend/service/TelemetryFrontendService.py b/src/telemetry/frontend/service/TelemetryFrontendService.py
index dc3f8df363a882db0f0ba3112a38f3bba3921c30..abd361aa0082e2de1d1f5fa7e81a336f3091af9a 100644
--- a/src/telemetry/frontend/service/TelemetryFrontendService.py
+++ b/src/telemetry/frontend/service/TelemetryFrontendService.py
@@ -14,17 +14,16 @@
 
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_port_grpc
-from monitoring.service.NameMapping import NameMapping
 from common.tools.service.GenericGrpcService import GenericGrpcService
 from common.proto.telemetry_frontend_pb2_grpc import add_TelemetryFrontendServiceServicer_to_server
 from telemetry.frontend.service.TelemetryFrontendServiceServicerImpl import TelemetryFrontendServiceServicerImpl
 
 
 class TelemetryFrontendService(GenericGrpcService):
-    def __init__(self, name_mapping : NameMapping, cls_name: str = __name__) -> None:
+    def __init__(self, cls_name: str = __name__) -> None:
         port = get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND)
         super().__init__(port, cls_name=cls_name)
-        self.telemetry_frontend_servicer = TelemetryFrontendServiceServicerImpl(name_mapping)
+        self.telemetry_frontend_servicer = TelemetryFrontendServiceServicerImpl()
 
     def install_servicers(self):
         add_TelemetryFrontendServiceServicer_to_server(self.telemetry_frontend_servicer, self.server)
diff --git a/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py b/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py
index e6830ad676d3934c88b01575ebdd1d0549fb00d1..b73d9fa952ee42aeb7adb8f3c0b2e4a3ba7f3e09 100644
--- a/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py
+++ b/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py
@@ -12,126 +12,167 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import ast
+import json
 import threading
-import time
-from typing import Tuple, Any
+from typing import Any, Dict
 import grpc
 import logging
 
-from confluent_kafka import Consumer as KafkaConsumer
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+from common.tools.kafka.Variables import KafkaConfig, KafkaTopic
 from common.proto.context_pb2 import Empty
-from monitoring.service.NameMapping import NameMapping
-from confluent_kafka import Producer as KafkaProducer
-from confluent_kafka import KafkaException
-from confluent_kafka import KafkaError
 from common.proto.telemetry_frontend_pb2 import CollectorId, Collector, CollectorFilter, CollectorList
-from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
 from common.proto.telemetry_frontend_pb2_grpc import TelemetryFrontendServiceServicer
 
 from telemetry.database.TelemetryModel import Collector as CollectorModel
-from telemetry.database.managementDB import managementDB
+from telemetry.database.Telemetry_DB import TelemetryDB
+
+from confluent_kafka import Consumer as KafkaConsumer
+from confluent_kafka import Producer as KafkaProducer
+from confluent_kafka import KafkaError
+
 
 LOGGER            = logging.getLogger(__name__)
-METRICS_POOL      = MetricsPool('Monitoring', 'TelemetryFrontend')
-KAFKA_SERVER_IP   = '127.0.0.1:9092'
-ACTIVE_COLLECTORS = []
-KAFKA_TOPICS      = {'request' : 'topic_request', 
-                     'response': 'topic_response'}
+METRICS_POOL      = MetricsPool('TelemetryFrontend', 'NBIgRPC')
+ACTIVE_COLLECTORS = []       # keep and can be populated from DB
 
 
 class TelemetryFrontendServiceServicerImpl(TelemetryFrontendServiceServicer):
-    def __init__(self, name_mapping : NameMapping):
+    def __init__(self):
         LOGGER.info('Init TelemetryFrontendService')
-        self.managementDBobj = managementDB()
-        self.kafka_producer = KafkaProducer({'bootstrap.servers': KAFKA_SERVER_IP,})
-        self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KAFKA_SERVER_IP,
-                                            'group.id'          : 'frontend',
-                                            'auto.offset.reset' : 'latest'})
-
-    def add_collector_to_db(self, request: Collector ): # type: ignore
-        try:
-            # Create a new Collector instance
-            collector_to_insert                     = CollectorModel()
-            collector_to_insert.collector_id        = request.collector_id.collector_id.uuid
-            collector_to_insert.kpi_id              = request.kpi_id.kpi_id.uuid
-            # collector_to_insert.collector_decription= request.collector
-            collector_to_insert.sampling_duration_s = request.duration_s
-            collector_to_insert.sampling_interval_s = request.interval_s
-            collector_to_insert.start_timestamp     = time.time()
-            collector_to_insert.end_timestamp       = time.time()
-            managementDB.add_row_to_db(collector_to_insert)
-        except Exception as e:
-            LOGGER.info("Unable to create collectorModel class object. {:}".format(e))
+        self.tele_db_obj = TelemetryDB()
+        self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()})
+        self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(),
+                                            'group.id'           : 'frontend',
+                                            'auto.offset.reset'  : 'latest'})
 
-    # @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+   
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def StartCollector(self, 
                        request : Collector, grpc_context: grpc.ServicerContext # type: ignore
                       ) -> CollectorId: # type: ignore
-        # push info to frontend db
         LOGGER.info ("gRPC message: {:}".format(request))
         response = CollectorId()
-        _collector_id       = str(request.collector_id.collector_id.uuid)
-        _collector_kpi_id   = str(request.kpi_id.kpi_id.uuid)
-        _collector_duration = int(request.duration_s)
-        _collector_interval = int(request.interval_s)
-        # pushing Collector to DB
-        self.add_collector_to_db(request)
-        self.publish_to_kafka_request_topic(_collector_id, _collector_kpi_id, _collector_duration, _collector_interval)
-        # self.run_publish_to_kafka_request_topic(_collector_id, _collector_kpi_id, _collector_duration, _collector_interval)
-        response.collector_id.uuid = request.collector_id.collector_id.uuid # type: ignore
+
+        # TODO: Verify the presence of Kpi ID in KpiDB or assume that KPI ID already exists?
+        self.tele_db_obj.add_row_to_db(
+            CollectorModel.ConvertCollectorToRow(request)
+        )
+        self.PublishStartRequestOnKafka(request)
+        
+        response.collector_id.uuid = request.collector_id.collector_id.uuid
         return response
-    
-    def run_publish_to_kafka_request_topic(self, msg_key: str, kpi: str, duration : int, interval: int):
-        # Add threading.Thread() response to dictonary and call start() in the next statement
-        threading.Thread(target=self.publish_to_kafka_request_topic, args=(msg_key, kpi, duration, interval)).start()
-
-    def publish_to_kafka_request_topic(self, 
-                             collector_id: str, kpi: str, duration : int, interval: int
-                             ):
+
+    def PublishStartRequestOnKafka(self, collector_obj):
+        """
+        Method to generate collector request on Kafka.
+        """
+        collector_uuid = collector_obj.collector_id.collector_id.uuid
+        collector_to_generate :  Dict = {
+            "kpi_id"  : collector_obj.kpi_id.kpi_id.uuid,
+            "duration": collector_obj.duration_s,
+            "interval": collector_obj.interval_s
+        }
+        self.kafka_producer.produce(
+            KafkaTopic.REQUEST.value,
+            key      = collector_uuid,
+            value    = json.dumps(collector_to_generate),
+            callback = self.delivery_callback
+        )
+        LOGGER.info("Collector Request Generated: Collector Id: {:}, Value: {:}".format(collector_uuid, collector_to_generate))
+        ACTIVE_COLLECTORS.append(collector_uuid)
+        self.kafka_producer.flush()
+
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def StopCollector(self, 
+                      request : CollectorId, grpc_context: grpc.ServicerContext # type: ignore
+                     ) -> Empty:  # type: ignore
+        LOGGER.info ("gRPC message: {:}".format(request))
+        try:
+            collector_to_delete = request.collector_id.uuid
+            self.tele_db_obj.delete_db_row_by_id(
+                CollectorModel, "collector_id", collector_to_delete
+            )
+            self.PublishStopRequestOnKafka(request)
+        except Exception as e:
+            LOGGER.error('Unable to delete collector. Error: {:}'.format(e))
+        return Empty()
+
+    def PublishStopRequestOnKafka(self, collector_id):
         """
-        Method to generate collector request to Kafka topic.
+        Method to generate stop collector request on Kafka.
         """
-        # time.sleep(5)
-        # producer_configs = {
-        #     'bootstrap.servers': KAFKA_SERVER_IP,
-        # }
-        # topic_request = "topic_request"
-        msg_value : Tuple [str, int, int] = (kpi, duration, interval)
-        # print ("Request generated: ", "Colletcor Id: ", collector_id, \
-        #         ", \nKPI: ", kpi, ", Duration: ", duration, ", Interval: ", interval)
-        # producerObj = KafkaProducer(producer_configs)
-        self.kafka_producer.produce(KAFKA_TOPICS['request'], key=collector_id, value= str(msg_value), callback=self.delivery_callback)
-        # producerObj.produce(KAFKA_TOPICS['request'], key=collector_id, value= str(msg_value), callback=self.delivery_callback)
-        LOGGER.info("Collector Request Generated: {:}, {:}, {:}, {:}".format(collector_id, kpi, duration, interval))
-        # producerObj.produce(topic_request, key=collector_id, value= str(msg_value), callback=self.delivery_callback)
-        ACTIVE_COLLECTORS.append(collector_id)
+        collector_uuid = collector_id.collector_id.uuid
+        collector_to_stop :  Dict = {
+            "kpi_id"  : collector_uuid,
+            "duration": -1,
+            "interval": -1
+        }
+        self.kafka_producer.produce(
+            KafkaTopic.REQUEST.value,
+            key      = collector_uuid,
+            value    = json.dumps(collector_to_stop),
+            callback = self.delivery_callback
+        )
+        LOGGER.info("Collector Stop Request Generated: Collector Id: {:}, Value: {:}".format(collector_uuid, collector_to_stop))
+        try:
+            ACTIVE_COLLECTORS.remove(collector_uuid)
+        except ValueError:
+            LOGGER.warning('Collector ID {:} not found in active collector list'.format(collector_uuid))
         self.kafka_producer.flush()
 
-    def run_kafka_listener(self):
-        # print ("--- STARTED: run_kafka_listener ---")
-        threading.Thread(target=self.kafka_listener).start()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectCollectors(self, 
+                         request : CollectorFilter, contextgrpc_context: grpc.ServicerContext # type: ignore
+                        ) -> CollectorList:  # type: ignore
+        LOGGER.info("gRPC message: {:}".format(request))
+        response = CollectorList()
+
+        try:
+            rows = self.tele_db_obj.select_with_filter(CollectorModel, request)
+        except Exception as e:
+            LOGGER.info('Unable to apply filter on kpi descriptor. {:}'.format(e))
+        try:
+            for row in rows:
+                collector_obj = CollectorModel.ConvertRowToCollector(row)
+                response.collector_list.append(collector_obj)
+            return response
+        except Exception as e:
+            LOGGER.info('Unable to process filter response {:}'.format(e))
+
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def delivery_callback(self, err, msg):
+        """
+        Callback function to handle message delivery status.
+        Args:
+            err (KafkaError): Kafka error object.
+            msg (Message): Kafka message object.
+        """
+        if err:
+            LOGGER.debug('Message delivery failed: {:}'.format(err))
+            print('Message delivery failed: {:}'.format(err))
+        # else:
+        #     LOGGER.debug('Message delivered to topic {:}'.format(msg.topic()))
+        #     print('Message delivered to topic {:}'.format(msg.topic()))
+
+    # ---------- Independent Method ---------------
+    # Listener method is independent of any method (same lifetime as service)
+    # continously listens for responses
+    def RunResponseListener(self):
+        threading.Thread(target=self.ResponseListener).start()
         return True
 
-    def kafka_listener(self):
+    def ResponseListener(self):
         """
         listener for response on Kafka topic.
         """
-        # # print ("--- STARTED: kafka_listener ---")
-        # conusmer_configs = {
-        #     'bootstrap.servers' : KAFKA_SERVER_IP,
-        #     'group.id'          : 'frontend',
-        #     'auto.offset.reset' : 'latest'
-        # }
-        # # topic_response = "topic_response"
-
-        # consumerObj = KafkaConsumer(conusmer_configs)
-        self.kafka_consumer.subscribe([KAFKA_TOPICS['response']])
-        # print (time.time())
+        self.kafka_consumer.subscribe([KafkaTopic.RESPONSE.value])
         while True:
             receive_msg = self.kafka_consumer.poll(2.0)
             if receive_msg is None:
-                # print (" - Telemetry frontend listening on Kafka Topic: ", KAFKA_TOPICS['response'])     # added for debugging purposes
                 continue
             elif receive_msg.error():
                 if receive_msg.error().code() == KafkaError._PARTITION_EOF:
@@ -142,63 +183,16 @@ class TelemetryFrontendServiceServicerImpl(TelemetryFrontendServiceServicer):
             try:
                 collector_id = receive_msg.key().decode('utf-8')
                 if collector_id in ACTIVE_COLLECTORS:
-                    (kpi_id, kpi_value) = ast.literal_eval(receive_msg.value().decode('utf-8'))
-                    self.process_response(collector_id, kpi_id, kpi_value)
+                    kpi_value = json.loads(receive_msg.value().decode('utf-8'))
+                    self.process_response(collector_id, kpi_value['kpi_id'], kpi_value['kpi_value'])
                 else:
                     print(f"collector id does not match.\nRespone ID: '{collector_id}' --- Active IDs: '{ACTIVE_COLLECTORS}' ")
             except Exception as e:
-                print(f"No message key found: {str(e)}")
+                print(f"Error extarcting msg key or value: {str(e)}")
                 continue
-                # return None
 
     def process_response(self, collector_id: str, kpi_id: str, kpi_value: Any):
         if kpi_id == "-1" and kpi_value == -1:
-            # LOGGER.info("Sucessfully terminated Collector: {:}".format(collector_id))
-            print ("Sucessfully terminated Collector: ", collector_id)
+            print ("Backend termination confirmation for collector id: ", collector_id)
         else:
-            print ("Frontend-Received values Collector Id:", collector_id, "-KPI:", kpi_id, "-VALUE:", kpi_value)
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def delivery_callback(self, err, msg):
-        """
-        Callback function to handle message delivery status.
-        Args:
-            err (KafkaError): Kafka error object.
-            msg (Message): Kafka message object.
-        """
-        if err:
-            print(f'Message delivery failed: {err}')
-        else:
-            print(f'Message delivered to topic {msg.topic()}')
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def StopCollector(self, 
-                      request : CollectorId, grpc_context: grpc.ServicerContext # type: ignore
-                     ) -> Empty:  # type: ignore
-        LOGGER.info ("gRPC message: {:}".format(request))
-        _collector_id = request.collector_id.uuid
-        self.publish_to_kafka_request_topic(_collector_id, "", -1, -1)
-        return Empty()
-
-    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def SelectCollectors(self, 
-                         request : CollectorFilter, contextgrpc_context: grpc.ServicerContext # type: ignore
-                        ) -> CollectorList:  # type: ignore
-        LOGGER.info("gRPC message: {:}".format(request))
-        response = CollectorList()
-        filter_to_apply = dict()
-        filter_to_apply['kpi_id']       = request.kpi_id[0].kpi_id.uuid
-        # filter_to_apply['duration_s'] = request.duration_s[0]
-        try:
-            rows = self.managementDBobj.select_with_filter(CollectorModel, **filter_to_apply)
-        except Exception as e:
-            LOGGER.info('Unable to apply filter on kpi descriptor. {:}'.format(e))
-        try:
-            if len(rows) != 0:
-                for row in rows:
-                    collector_obj = Collector()
-                    collector_obj.collector_id.collector_id.uuid = row.collector_id
-                    response.collector_list.append(collector_obj)
-            return response
-        except Exception as e:
-            LOGGER.info('Unable to process response {:}'.format(e))
\ No newline at end of file
+            print ("KPI Value: Collector Id:", collector_id, ", Kpi Id:", kpi_id, ", Value:", kpi_value)
diff --git a/src/telemetry/frontend/service/__main__.py b/src/telemetry/frontend/service/__main__.py
index 3b0263706c3dad3756306d1ba8a3a104d568cd6f..2a6c5dbcf2da6b6a074c2b8ee23791bc4896442f 100644
--- a/src/telemetry/frontend/service/__main__.py
+++ b/src/telemetry/frontend/service/__main__.py
@@ -12,16 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import signal
-import sys
-import logging, threading
+import logging, signal, sys, threading
 from prometheus_client import start_http_server
-from monitoring.service.NameMapping import NameMapping
+from common.Settings import get_log_level, get_metrics_port
 from .TelemetryFrontendService import TelemetryFrontendService
-from monitoring.service.EventTools import EventsDeviceCollector
-from common.Settings import (
-    get_log_level, wait_for_environment_variables, get_env_var_name, 
-    get_metrics_port )
 
 terminate = threading.Event()
 LOGGER = None
@@ -31,20 +25,12 @@ def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
     terminate.set()
 
 def main():
-    global LOGGER
+    global LOGGER # pylint: disable=global-statement
 
     log_level = get_log_level()
     logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
     LOGGER = logging.getLogger(__name__)
 
-# ------- will be added later --------------
-    # wait_for_environment_variables([
-    #     get_env_var_name
-
-
-    # ])
-# ------- will be added later --------------
-
     signal.signal(signal.SIGINT,  signal_handler)
     signal.signal(signal.SIGTERM, signal_handler)
 
@@ -54,9 +40,7 @@ def main():
     metrics_port = get_metrics_port()
     start_http_server(metrics_port)
 
-    name_mapping = NameMapping()
-
-    grpc_service = TelemetryFrontendService(name_mapping)
+    grpc_service = TelemetryFrontendService()
     grpc_service.start()
 
     # Wait for Ctrl+C or termination signal
@@ -69,4 +53,4 @@ def main():
     return 0
 
 if __name__ == '__main__':
-    sys.exit(main())
\ No newline at end of file
+    sys.exit(main())
diff --git a/src/telemetry/frontend/tests/Messages.py b/src/telemetry/frontend/tests/Messages.py
index 1205898d13a610cd262979242e4f489f5e35cdb8..a0e93e8a121b9efaac83f7169419911c8ee6e3ea 100644
--- a/src/telemetry/frontend/tests/Messages.py
+++ b/src/telemetry/frontend/tests/Messages.py
@@ -16,68 +16,27 @@ import uuid
 import random
 from common.proto import telemetry_frontend_pb2
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
-
+from common.proto.kpi_manager_pb2 import KpiId
 
 # ----------------------- "2nd" Iteration --------------------------------
 def create_collector_id():
     _collector_id                   = telemetry_frontend_pb2.CollectorId()
-    _collector_id.collector_id.uuid = uuid.uuid4()
+    # _collector_id.collector_id.uuid = str(uuid.uuid4())
+    _collector_id.collector_id.uuid = "5d45f53f-d567-429f-9427-9196ac72ff0c"
     return _collector_id
 
-# def create_collector_id_a(coll_id_str : str):
-#     _collector_id                   = telemetry_frontend_pb2.CollectorId()
-#     _collector_id.collector_id.uuid = str(coll_id_str)
-#     return _collector_id
-
 def create_collector_request():
     _create_collector_request                                = telemetry_frontend_pb2.Collector()
     _create_collector_request.collector_id.collector_id.uuid = str(uuid.uuid4())
-    _create_collector_request.kpi_id.kpi_id.uuid             = "165d20c5-a446-42fa-812f-e2b7ed283c6f"
-    # _create_collector_request.collector                      = "collector description"
+    _create_collector_request.kpi_id.kpi_id.uuid             = str(uuid.uuid4())
     _create_collector_request.duration_s                     = float(random.randint(8, 16))
     _create_collector_request.interval_s                     = float(random.randint(2, 4))
     return _create_collector_request
 
 def create_collector_filter():
     _create_collector_filter = telemetry_frontend_pb2.CollectorFilter()
-    new_kpi_id               = _create_collector_filter.kpi_id.add()
-    new_kpi_id.kpi_id.uuid   = "165d20c5-a446-42fa-812f-e2b7ed283c6f"
+    kpi_id_obj               = KpiId()
+    # kpi_id_obj.kpi_id.uuid   = str(uuid.uuid4())
+    kpi_id_obj.kpi_id.uuid   = "a7237fa3-caf4-479d-84b6-4d9f9738fb7f"
+    _create_collector_filter.kpi_id.append(kpi_id_obj)
     return _create_collector_filter
-
-# ----------------------- "First" Iteration --------------------------------
-# def create_collector_request_a():
-#     _create_collector_request_a                                = telemetry_frontend_pb2.Collector()
-#     _create_collector_request_a.collector_id.collector_id.uuid = "-1"
-#     return _create_collector_request_a
-
-# def create_collector_request_b(str_kpi_id, coll_duration_s, coll_interval_s
-#                                ) -> telemetry_frontend_pb2.Collector:
-#     _create_collector_request_b                                = telemetry_frontend_pb2.Collector()
-#     _create_collector_request_b.collector_id.collector_id.uuid = '1'
-#     _create_collector_request_b.kpi_id.kpi_id.uuid             = str_kpi_id
-#     _create_collector_request_b.duration_s                     = coll_duration_s
-#     _create_collector_request_b.interval_s                     = coll_interval_s
-#     return _create_collector_request_b
-
-# def create_collector_filter():
-#     _create_collector_filter = telemetry_frontend_pb2.CollectorFilter()
-#     new_collector_id                       = _create_collector_filter.collector_id.add()
-#     new_collector_id.collector_id.uuid     = "COLL1"
-#     new_kpi_id                             = _create_collector_filter.kpi_id.add()
-#     new_kpi_id.kpi_id.uuid                 = "KPI1"
-#     new_device_id                          = _create_collector_filter.device_id.add()
-#     new_device_id.device_uuid.uuid         = 'DEV1'
-#     new_service_id                         = _create_collector_filter.service_id.add()
-#     new_service_id.service_uuid.uuid       = 'SERV1'
-#     new_slice_id                           = _create_collector_filter.slice_id.add()
-#     new_slice_id.slice_uuid.uuid           = 'SLC1'
-#     new_endpoint_id                        = _create_collector_filter.endpoint_id.add()
-#     new_endpoint_id.endpoint_uuid.uuid     = 'END1'
-#     new_connection_id                      = _create_collector_filter.connection_id.add()
-#     new_connection_id.connection_uuid.uuid = 'CON1'
-#     _create_collector_filter.kpi_sample_type.append(KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED)
-#     return _create_collector_filter
-
-# def create_collector_list():
-#     _create_collector_list = telemetry_frontend_pb2.CollectorList()
-#     return _create_collector_list
\ No newline at end of file
diff --git a/src/telemetry/frontend/tests/test_frontend.py b/src/telemetry/frontend/tests/test_frontend.py
index 002cc430721845aa5aa18274375e2c22b5d77ff7..9c3f9d3a8f545792eb2bb3a371c6c20664d24f69 100644
--- a/src/telemetry/frontend/tests/test_frontend.py
+++ b/src/telemetry/frontend/tests/test_frontend.py
@@ -13,129 +13,40 @@
 # limitations under the License.
 
 import os
-import time
 import pytest
 import logging
-from typing import Union
 
-from common.proto.context_pb2 import Empty
 from common.Constants import ServiceNameEnum
 from common.proto.telemetry_frontend_pb2 import CollectorId, CollectorList
-from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
-from context.client.ContextClient import ContextClient
-from common.tools.service.GenericGrpcService import GenericGrpcService
-from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
+from common.proto.context_pb2 import Empty
+from common.tools.kafka.Variables import KafkaTopic
 from common.Settings import ( 
     get_service_port_grpc, get_env_var_name, ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC)
 
 from telemetry.frontend.client.TelemetryFrontendClient import TelemetryFrontendClient
 from telemetry.frontend.service.TelemetryFrontendService import TelemetryFrontendService
+from telemetry.frontend.tests.Messages import (
+     create_collector_request, create_collector_id, create_collector_filter)
 from telemetry.frontend.service.TelemetryFrontendServiceServicerImpl import TelemetryFrontendServiceServicerImpl
-from telemetry.frontend.tests.Messages import ( create_collector_request, create_collector_filter)
-from telemetry.database.managementDB import managementDB
-from telemetry.database.TelemetryEngine import TelemetryEngine
-
-from device.client.DeviceClient import DeviceClient
-from device.service.DeviceService import DeviceService
-from device.service.driver_api.DriverFactory import DriverFactory
-from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
 
-from monitoring.service.NameMapping import NameMapping
-
-os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE'
-from device.service.drivers import DRIVERS
 
 ###########################
 # Tests Setup
 ###########################
 
 LOCAL_HOST = '127.0.0.1'
-MOCKSERVICE_PORT = 10000
 
-TELEMETRY_FRONTEND_PORT = str(MOCKSERVICE_PORT) + str(get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND))
+TELEMETRY_FRONTEND_PORT = str(get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND))
 os.environ[get_env_var_name(ServiceNameEnum.TELEMETRYFRONTEND, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
 os.environ[get_env_var_name(ServiceNameEnum.TELEMETRYFRONTEND, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(TELEMETRY_FRONTEND_PORT)
 
 LOGGER = logging.getLogger(__name__)
 
-class MockContextService(GenericGrpcService):
-    # Mock Service implementing Context to simplify unitary tests of Monitoring
-
-    def __init__(self, bind_port: Union[str, int]) -> None:
-        super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService')
-
-    # pylint: disable=attribute-defined-outside-init
-    def install_servicers(self):
-        self.context_servicer = MockServicerImpl_Context()
-        add_ContextServiceServicer_to_server(self.context_servicer, self.server)
-
 @pytest.fixture(scope='session')
-def context_service():
-    LOGGER.info('Initializing MockContextService...')
-    _service = MockContextService(MOCKSERVICE_PORT)
-    _service.start()
-    
-    LOGGER.info('Yielding MockContextService...')
-    yield _service
-
-    LOGGER.info('Terminating MockContextService...')
-    _service.context_servicer.msg_broker.terminate()
-    _service.stop()
-
-    LOGGER.info('Terminated MockContextService...')
-
-@pytest.fixture(scope='session')
-def context_client(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument
-    LOGGER.info('Initializing ContextClient...')
-    _client = ContextClient()
-    
-    LOGGER.info('Yielding ContextClient...')
-    yield _client
-
-    LOGGER.info('Closing ContextClient...')
-    _client.close()
-
-    LOGGER.info('Closed ContextClient...')
-
-@pytest.fixture(scope='session')
-def device_service(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument
-    LOGGER.info('Initializing DeviceService...')
-    driver_factory = DriverFactory(DRIVERS)
-    driver_instance_cache = DriverInstanceCache(driver_factory)
-    _service = DeviceService(driver_instance_cache)
-    _service.start()
-
-    # yield the server, when test finishes, execution will resume to stop it
-    LOGGER.info('Yielding DeviceService...')
-    yield _service
-
-    LOGGER.info('Terminating DeviceService...')
-    _service.stop()
-
-    LOGGER.info('Terminated DeviceService...')
-
-@pytest.fixture(scope='session')
-def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument
-    LOGGER.info('Initializing DeviceClient...')
-    _client = DeviceClient()
-
-    LOGGER.info('Yielding DeviceClient...')
-    yield _client
-
-    LOGGER.info('Closing DeviceClient...')
-    _client.close()
-
-    LOGGER.info('Closed DeviceClient...')
-
-@pytest.fixture(scope='session')
-def telemetryFrontend_service(
-        context_service : MockContextService,
-        device_service  : DeviceService
-    ):
+def telemetryFrontend_service():
     LOGGER.info('Initializing TelemetryFrontendService...')
-    name_mapping = NameMapping()
 
-    _service = TelemetryFrontendService(name_mapping)
+    _service = TelemetryFrontendService()
     _service.start()
 
     # yield the server, when test finishes, execution will resume to stop it
@@ -168,37 +79,73 @@ def telemetryFrontend_client(
 # Tests Implementation of Telemetry Frontend
 ###########################
 
-def test_verify_db_and_table():
-    LOGGER.info(' >>> test_verify_database_and_tables START: <<< ')
-    _engine = TelemetryEngine.get_engine()
-    managementDB.create_database(_engine)
-    managementDB.create_tables(_engine)
+# ------- Re-structuring Test ---------
+# --- "test_validate_kafka_topics" should be run before the functionality tests ---
+def test_validate_kafka_topics():
+    LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ")
+    response = KafkaTopic.create_all_topics()
+    assert isinstance(response, bool)
 
+# ----- core funtionality test -----
 def test_StartCollector(telemetryFrontend_client):
     LOGGER.info(' >>> test_StartCollector START: <<< ')
     response = telemetryFrontend_client.StartCollector(create_collector_request())
     LOGGER.debug(str(response))
     assert isinstance(response, CollectorId)
 
-def test_run_kafka_listener():
-    LOGGER.info(' >>> test_run_kafka_listener START: <<< ')
-    name_mapping = NameMapping()
-    TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl(name_mapping)
-    response = TelemetryFrontendServiceObj.run_kafka_listener()     # Method "run_kafka_listener" is not define in frontend.proto
-    LOGGER.debug(str(response))
-    assert isinstance(response, bool)
-
 def test_StopCollector(telemetryFrontend_client):
     LOGGER.info(' >>> test_StopCollector START: <<< ')
-    _collector_id = telemetryFrontend_client.StartCollector(create_collector_request())
-    time.sleep(3)   # wait for small amount before call the stopCollecter()
-    response = telemetryFrontend_client.StopCollector(_collector_id)
+    response = telemetryFrontend_client.StopCollector(create_collector_id())
     LOGGER.debug(str(response))
     assert isinstance(response, Empty)
 
-def test_select_collectors(telemetryFrontend_client):
-    LOGGER.info(' >>> test_select_collector requesting <<< ')
+def test_SelectCollectors(telemetryFrontend_client):
+    LOGGER.info(' >>> test_SelectCollectors START: <<< ')
     response = telemetryFrontend_client.SelectCollectors(create_collector_filter())
-    LOGGER.info('Received Rows after applying Filter: {:} '.format(response))
     LOGGER.debug(str(response))
-    assert isinstance(response, CollectorList)
\ No newline at end of file
+    assert isinstance(response, CollectorList)
+
+# ----- Non-gRPC method tests ----- 
+def test_RunResponseListener():
+    LOGGER.info(' >>> test_RunResponseListener START: <<< ')
+    TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl()
+    response = TelemetryFrontendServiceObj.RunResponseListener()     # becasue Method "run_kafka_listener" is not define in frontend.proto
+    LOGGER.debug(str(response))
+    assert isinstance(response, bool)
+
+# ------- previous test ----------------
+
+# def test_verify_db_and_table():
+#     LOGGER.info(' >>> test_verify_database_and_tables START: <<< ')
+#     _engine = TelemetryEngine.get_engine()
+#     managementDB.create_database(_engine)
+#     managementDB.create_tables(_engine)
+
+# def test_StartCollector(telemetryFrontend_client):
+#     LOGGER.info(' >>> test_StartCollector START: <<< ')
+#     response = telemetryFrontend_client.StartCollector(create_collector_request())
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, CollectorId)
+
+# def test_run_kafka_listener():
+#     LOGGER.info(' >>> test_run_kafka_listener START: <<< ')
+#     name_mapping = NameMapping()
+#     TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl(name_mapping)
+#     response = TelemetryFrontendServiceObj.run_kafka_listener()     # Method "run_kafka_listener" is not define in frontend.proto
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, bool)
+
+# def test_StopCollector(telemetryFrontend_client):
+#     LOGGER.info(' >>> test_StopCollector START: <<< ')
+#     _collector_id = telemetryFrontend_client.StartCollector(create_collector_request())
+#     time.sleep(3)   # wait for small amount before call the stopCollecter()
+#     response = telemetryFrontend_client.StopCollector(_collector_id)
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, Empty)
+
+# def test_select_collectors(telemetryFrontend_client):
+#     LOGGER.info(' >>> test_select_collector requesting <<< ')
+#     response = telemetryFrontend_client.SelectCollectors(create_collector_filter())
+#     LOGGER.info('Received Rows after applying Filter: {:} '.format(response))
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, CollectorList)
\ No newline at end of file
diff --git a/src/telemetry/telemetry_virenv.txt b/src/telemetry/telemetry_virenv.txt
deleted file mode 100644
index e39f80b6593d6c41411751cdd0ea59ee05344570..0000000000000000000000000000000000000000
--- a/src/telemetry/telemetry_virenv.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-anytree==2.8.0
-APScheduler==3.10.1
-attrs==23.2.0
-certifi==2024.2.2
-charset-normalizer==2.0.12
-colorama==0.4.6
-confluent-kafka==2.3.0
-coverage==6.3
-future-fstrings==1.2.0
-greenlet==3.0.3
-grpcio==1.47.5
-grpcio-health-checking==1.47.5
-grpcio-tools==1.47.5
-grpclib==0.4.4
-h2==4.1.0
-hpack==4.0.0
-hyperframe==6.0.1
-idna==3.7
-influx-line-protocol==0.1.4
-iniconfig==2.0.0
-kafka-python==2.0.2
-multidict==6.0.5
-networkx==3.3
-packaging==24.0
-pluggy==1.5.0
-prettytable==3.5.0
-prometheus-client==0.13.0
-protobuf==3.20.3
-psycopg2-binary==2.9.3
-py==1.11.0
-py-cpuinfo==9.0.0
-pytest==6.2.5
-pytest-benchmark==3.4.1
-pytest-depends==1.0.1
-python-dateutil==2.8.2
-python-json-logger==2.0.2
-pytz==2024.1
-questdb==1.0.1
-requests==2.27.1
-six==1.16.0
-SQLAlchemy==1.4.52
-sqlalchemy-cockroachdb==1.4.4
-SQLAlchemy-Utils==0.38.3
-toml==0.10.2
-typing_extensions==4.12.0
-tzlocal==5.2
-urllib3==1.26.18
-wcwidth==0.2.13
-xmltodict==0.12.0
diff --git a/src/telemetry/database/tests/messages.py b/src/telemetry/tests/messages.py
similarity index 100%
rename from src/telemetry/database/tests/messages.py
rename to src/telemetry/tests/messages.py
diff --git a/src/telemetry/tests/test_telemetryDB.py b/src/telemetry/tests/test_telemetryDB.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4976f8c2144fcdcad43a3e25d43091010de0d18
--- /dev/null
+++ b/src/telemetry/tests/test_telemetryDB.py
@@ -0,0 +1,28 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import logging
+from telemetry.database.Telemetry_DB import TelemetryDB
+
+LOGGER = logging.getLogger(__name__)
+
+def test_verify_databases_and_tables():
+    LOGGER.info('>>> test_verify_databases_and_tables : START <<< ')
+    TelemetryDBobj = TelemetryDB()
+    TelemetryDBobj.drop_database()
+    TelemetryDBobj.verify_tables()
+    TelemetryDBobj.create_database()
+    TelemetryDBobj.create_tables()
+    TelemetryDBobj.verify_tables()
\ No newline at end of file