diff --git a/my_deploy.sh b/my_deploy.sh
index 8241cd72689cf7ebfeda66b9335296a91f2a55f0..dfb0c1b000180cb38992522e43565b43e4ff4d22 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -26,7 +26,7 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_gene
 #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
 
 # Uncomment to activate Monitoring Framework (new)
-#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api"
+#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics"
 
 # Uncomment to activate BGP-LS Speaker
 #export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
diff --git a/scripts/run_tests_locally-analytics-backend.sh b/scripts/run_tests_locally-analytics-backend.sh
new file mode 100755
index 0000000000000000000000000000000000000000..99a789c9fda481942b904a2e805202642123835c
--- /dev/null
+++ b/scripts/run_tests_locally-analytics-backend.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}')
+export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
+python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
+    analytics/backend/tests/test_backend.py
diff --git a/scripts/run_tests_locally-kpi-DB.sh b/scripts/run_tests_locally-kpi-DB.sh
index 4953b49e0a437becfda1648c722bcdcf92c58d93..ad1b4c57b6632266d539db07637fb1c0b024cf36 100755
--- a/scripts/run_tests_locally-kpi-DB.sh
+++ b/scripts/run_tests_locally-kpi-DB.sh
@@ -24,7 +24,7 @@ cd $PROJECTDIR/src
 # python3 kpi_manager/tests/test_unitary.py
 
 RCFILE=$PROJECTDIR/coverage/.coveragerc
-CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
+CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace ${CRDB_NAMESPACE} -o 'jsonpath={.spec.clusterIP}')
 export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
 python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
     kpi_manager/tests/test_kpi_db.py
diff --git a/scripts/run_tests_locally-telemetry-DB.sh b/scripts/run_tests_locally-telemetry-DB.sh
index 4b9a417603cc42a4e7e8b19c7394cc38633817fa..529774f2da703347abafdc168b932ef4ac29bed1 100755
--- a/scripts/run_tests_locally-telemetry-DB.sh
+++ b/scripts/run_tests_locally-telemetry-DB.sh
@@ -20,7 +20,8 @@ cd $PROJECTDIR/src
 # RCFILE=$PROJECTDIR/coverage/.coveragerc
 # coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
 #     kpi_manager/tests/test_unitary.py
-
+CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
+export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 python3 -m pytest --log-level=DEBUG --log-cli-level=debug --verbose \
     telemetry/tests/test_telemetryDB.py
diff --git a/scripts/run_tests_locally-telemetry-backend.sh b/scripts/run_tests_locally-telemetry-backend.sh
index 79db05fcf1259365e8a909ee99395eb59dfb9437..4867335a53ee17c1fa279b2ee6bcf2bbac6bd1ba 100755
--- a/scripts/run_tests_locally-telemetry-backend.sh
+++ b/scripts/run_tests_locally-telemetry-backend.sh
@@ -22,7 +22,8 @@ cd $PROJECTDIR/src
 #     kpi_manager/tests/test_unitary.py
 
 # python3 kpi_manager/tests/test_unitary.py
-
+CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}')
+export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 python3 -m pytest --log-level=INFO --log-cli-level=debug --verbose \
-    telemetry/backend/tests/test_TelemetryBackend.py
+    telemetry/backend/tests/test_backend.py
diff --git a/scripts/run_tests_locally-telemetry-frontend.sh b/scripts/run_tests_locally-telemetry-frontend.sh
index a2a1de52340cac527d4d1c446c76740d38ce7783..0ed828310e2adbaf3a61e5d0e8a0a8e2283452d4 100755
--- a/scripts/run_tests_locally-telemetry-frontend.sh
+++ b/scripts/run_tests_locally-telemetry-frontend.sh
@@ -22,7 +22,8 @@ cd $PROJECTDIR/src
 #     kpi_manager/tests/test_unitary.py
 
 # python3 kpi_manager/tests/test_unitary.py
-
+CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}')
+export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
     telemetry/frontend/tests/test_frontend.py
diff --git a/scripts/show_logs_telemetry-backend.sh b/scripts/show_logs_telemetry-backend.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c28083dcbf5c7056145d1a0696116da66b5e9828
--- /dev/null
+++ b/scripts/show_logs_telemetry-backend.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/telemetryservice -c backend
diff --git a/scripts/show_logs_telemetry-frontend.sh b/scripts/show_logs_telemetry-frontend.sh
new file mode 100755
index 0000000000000000000000000000000000000000..821dc275b22ebf7ffc63d2e8c41dfab684407895
--- /dev/null
+++ b/scripts/show_logs_telemetry-frontend.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/telemetryservice -c frontend
diff --git a/src/analytics/.gitlab-ci.yml b/src/analytics/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..33ea9f3cf4534e02f40aec13420a4839c86a0d10
--- /dev/null
+++ b/src/analytics/.gitlab-ci.yml
@@ -0,0 +1,203 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build analytics:
+  variables:
+    IMAGE_NAME: 'analytics'       # name of the microservice
+    IMAGE_TAG: 'latest'             # tag of the container image (production, development, etc)
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    # This first build tags the builder resulting image to prevent being removed by dangling image removal command
+    # - docker buildx build -t "${IMAGE_NAME}-backend:${IMAGE_TAG}-builder" --target builder -f ./src/$IMAGE_NAME/backend/Dockerfile .
+    - docker buildx build -t "${IMAGE_NAME}-frontend:$IMAGE_TAG" -f ./src/$IMAGE_NAME/frontend/Dockerfile .
+    - docker buildx build -t "${IMAGE_NAME}-backend:$IMAGE_TAG" -f ./src/$IMAGE_NAME/backend/Dockerfile .
+    - docker tag "${IMAGE_NAME}-frontend:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG"
+    - docker tag "${IMAGE_NAME}-backend:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/.gitlab-ci.yml
+      - src/$IMAGE_NAME/frontend/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/frontend/Dockerfile
+      - src/$IMAGE_NAME/frontend/tests/*.py
+      - src/$IMAGE_NAME/backend/Dockerfile
+      - src/$IMAGE_NAME/backend/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/backend/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+
+# Apply unit test to the component
+unit_test analytics-backend:
+  variables:
+    IMAGE_NAME: 'analytics' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build analytics
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
+    - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi
+    - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi
+    - if docker container ls | grep ${IMAGE_NAME}-backend; then docker rm -f ${IMAGE_NAME}-backend; else echo "${IMAGE_NAME}-backend container is not in the system"; fi
+    - docker container prune -f
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG"
+    - docker pull "bitnami/zookeeper:latest"
+    - docker pull "bitnami/kafka:latest"
+    - >
+      docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
+      bitnami/zookeeper:latest
+    - sleep 10 # Wait for Zookeeper to start
+    - >
+      docker run --name kafka -d --network=teraflowbridge -p 9092:9092
+      --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
+      --env ALLOW_PLAINTEXT_LISTENER=yes
+      bitnami/kafka:latest
+    - sleep 10 # Wait for Kafka to start
+    - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $KAFKA_IP    
+    - >
+      docker run --name $IMAGE_NAME-backend -d -p 30060:30060
+      --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092"
+      --volume "$PWD/src/$IMAGE_NAME/backend/tests:/opt/results"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/${IMAGE_NAME}-backend:$IMAGE_TAG
+    - docker ps -a
+    - sleep 5
+    - docker logs ${IMAGE_NAME}-backend
+    - >
+      docker exec -i ${IMAGE_NAME}-backend bash -c
+      "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}-backend_report.xml $IMAGE_NAME/backend/tests/test_*.py"
+    - docker exec -i ${IMAGE_NAME}-backend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+  coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    - docker network rm teraflowbridge
+    - docker volume prune --force
+    - docker image prune --force
+    - docker rm -f ${IMAGE_NAME}-backend
+    - docker rm -f zookeeper
+    - docker rm -f kafka
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/backend/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/backend/Dockerfile
+      - src/$IMAGE_NAME/backend/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+  artifacts:
+      when: always
+      reports:
+        junit: src/$IMAGE_NAME/backend/tests/${IMAGE_NAME}-backend_report.xml
+
+# Apply unit test to the component
+unit_test analytics-frontend:
+  variables:
+    IMAGE_NAME: 'analytics' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build analytics
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
+    - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi
+    - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi
+    - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi
+    - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi
+    - if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend container is not in the system"; fi
+    - docker container prune -f
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG"
+    - docker pull "bitnami/zookeeper:latest"
+    - docker pull "bitnami/kafka:latest"
+    - docker pull "cockroachdb/cockroach:latest-v22.2"
+    - docker volume create crdb
+    - >
+      docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080
+      --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123
+      --volume "crdb:/cockroach/cockroach-data"
+      cockroachdb/cockroach:latest-v22.2 start-single-node
+    - echo "Waiting for initialization..."
+    - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done
+    # - docker logs crdb
+    # - docker ps -a
+    - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $CRDB_ADDRESS
+    - >
+      docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 \
+      -e ALLOW_ANONYMOUS_LOGIN=yes \
+      bitnami/zookeeper:latest
+    - sleep 10 # Wait for Zookeeper to start
+    - docker run --name kafka -d --network=teraflowbridge -p 9092:9092
+      --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
+      --env ALLOW_PLAINTEXT_LISTENER=yes
+      bitnami/kafka:latest
+    - sleep 10 # Wait for Kafka to start
+    - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $KAFKA_IP
+    # - docker logs zookeeper
+    # - docker logs kafka
+    - >
+      docker run --name $IMAGE_NAME-frontend -d -p 30050:30050
+      --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require"
+      --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092"
+      --volume "$PWD/src/$IMAGE_NAME/frontend/tests:/opt/results"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/${IMAGE_NAME}-frontend:$IMAGE_TAG
+    - docker ps -a
+    - sleep 5
+    - docker logs ${IMAGE_NAME}-frontend
+    - >
+      docker exec -i ${IMAGE_NAME}-frontend bash -c
+      "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}-frontend_report.xml $IMAGE_NAME/frontend/tests/test_*.py"
+    - docker exec -i ${IMAGE_NAME}-frontend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+  coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    - docker volume rm -f crdb
+    - docker network rm teraflowbridge
+    - docker volume prune --force
+    - docker image prune --force
+    - docker rm -f ${IMAGE_NAME}-frontend
+    - docker rm -f zookeeper
+    - docker rm -f kafka
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/frontend/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/frontend/Dockerfile
+      - src/$IMAGE_NAME/frontend/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+  artifacts:
+      when: always
+      reports:
+        junit: src/$IMAGE_NAME/frontend/tests/${IMAGE_NAME}-frontend_report.xml
\ No newline at end of file
diff --git a/src/analytics/backend/Dockerfile b/src/analytics/backend/Dockerfile
index df5cd7fbde6dc45780fdb3a333594ab11c1ab146..ef49657cdc270153b5de416cf46bef35bf2c04e6 100644
--- a/src/analytics/backend/Dockerfile
+++ b/src/analytics/backend/Dockerfile
@@ -59,7 +59,7 @@ RUN apt-get update && \
     apt-get clean
 
 # Set JAVA_HOME environment variable
-ENV JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64
+ENV JAVA_HOME=/usr/lib/jvm/java-17-openjdk-amd64
 ENV PATH=$JAVA_HOME/bin:$PATH
 
 # Create component sub-folders, get specific Python packages
diff --git a/src/analytics/backend/service/AnalyticsBackendService.py b/src/analytics/backend/service/AnalyticsBackendService.py
index 658d237956b4ed3addbbc295ef0d19dd4b977257..96b1f52428a2e612f7869ca90e878f4774cf6b8a 100755
--- a/src/analytics/backend/service/AnalyticsBackendService.py
+++ b/src/analytics/backend/service/AnalyticsBackendService.py
@@ -46,10 +46,10 @@ class AnalyticsBackendService(GenericGrpcService):
         thresholds    = analyzer['thresholds']
         window_size   = analyzer['window_size']
         window_slider = analyzer['window_slider']
-        print ("Received parameters: {:} - {:} - {:} - {:} - {:}".format(
-            kpi_list, oper_list, thresholds, window_size, window_slider))
-        LOGGER.debug ("Received parameters: {:} - {:} - {:} - {:} - {:}".format(
-            kpi_list, oper_list, thresholds, window_size, window_slider))
+        # print ("Received parameters: {:} - {:} - {:} - {:} - {:}".format(
+        #     kpi_list, oper_list, thresholds, window_size, window_slider))
+        # LOGGER.debug ("Received parameters: {:} - {:} - {:} - {:} - {:}".format(
+        #     kpi_list, oper_list, thresholds, window_size, window_slider))
         try:
             stop_event = threading.Event()
             thread = threading.Thread(target=SparkStreamer, 
@@ -86,6 +86,7 @@ class AnalyticsBackendService(GenericGrpcService):
         listener for requests on Kafka topic.
         """
         LOGGER.info("Request Listener is initiated ...")
+        print      ("Request Listener is initiated ...")
         consumer = self.kafka_consumer
         consumer.subscribe([KafkaTopic.ANALYTICS_REQUEST.value])
         while True:
@@ -98,17 +99,19 @@ class AnalyticsBackendService(GenericGrpcService):
                 else:
                     print("Consumer error: {}".format(receive_msg.error()))
                     break
-            analyzer      = json.loads(receive_msg.value().decode('utf-8'))
-            analyzer_uuid = receive_msg.key().decode('utf-8')
-            LOGGER.debug('Recevied Analyzer: {:} - {:}'.format(analyzer_uuid, analyzer))
-            print       ('Recevied Analyzer: {:} - {:}'.format(analyzer_uuid, analyzer))
+            try:
+                analyzer      = json.loads(receive_msg.value().decode('utf-8'))
+                analyzer_uuid = receive_msg.key().decode('utf-8')
+                LOGGER.debug('Recevied Analyzer: {:} - {:}'.format(analyzer_uuid, analyzer))
+                print       ('Recevied Analyzer: {:} - {:}'.format(analyzer_uuid, analyzer))
 
-            if analyzer["algo_name"] is None and analyzer["oper_mode"] is None:
-                self.TerminateAnalyzerBackend(analyzer_uuid)
-            else:
-                self.StartSparkStreamer(analyzer_uuid, analyzer)
-        LOGGER.debug("Stop Event activated. Terminating...")
-        print       ("Stop Event activated. Terminating...")
+                if analyzer["algo_name"] is None and analyzer["oper_mode"] is None:
+                    self.TerminateAnalyzerBackend(analyzer_uuid)
+                else:
+                    self.StartSparkStreamer(analyzer_uuid, analyzer)
+            except Exception as e:
+                LOGGER.warning("Unable to consume message from topic: {:}. ERROR: {:}".format(KafkaTopic.ANALYTICS_REQUEST.value, e))
+                print         ("Unable to consume message from topic: {:}. ERROR: {:}".format(KafkaTopic.ANALYTICS_REQUEST.value, e))
 
     def TerminateAnalyzerBackend(self, analyzer_uuid):
         if analyzer_uuid in self.running_threads:
diff --git a/src/analytics/backend/tests/messages.py b/src/analytics/backend/tests/messages.py
index c3b78967efe13eef9a60e19e50e56bdfca4a410d..e5faaa1f5903d97d5201b46119eff931f420696b 100644
--- a/src/analytics/backend/tests/messages.py
+++ b/src/analytics/backend/tests/messages.py
@@ -42,14 +42,14 @@ def get_threshold_dict():
 def create_analyzer():
     _create_analyzer                              = Analyzer()
     # _create_analyzer.analyzer_id.analyzer_id.uuid = str(uuid.uuid4())
-    _create_analyzer.analyzer_id.analyzer_id.uuid = "efef4d95-1cf1-43c4-9742-95c283ddd7a6"
+    _create_analyzer.analyzer_id.analyzer_id.uuid = "1e22f180-ba28-4641-b190-2287bf446666"
     _create_analyzer.algorithm_name               = "Test_Aggergate_and_Threshold"
     _create_analyzer.operation_mode               = AnalyzerOperationMode.ANALYZEROPERATIONMODE_STREAMING
     
     _kpi_id = KpiId()
     # input IDs to analyze
     _kpi_id.kpi_id.uuid              = str(uuid.uuid4())
-    _kpi_id.kpi_id.uuid              = "6e22f180-ba28-4641-b190-2287bf448888"
+    _kpi_id.kpi_id.uuid              = "6e22f180-ba28-4641-b190-2287bf448888" 
     _create_analyzer.input_kpi_ids.append(_kpi_id)
     _kpi_id.kpi_id.uuid              = str(uuid.uuid4())
     _kpi_id.kpi_id.uuid              = "1e22f180-ba28-4641-b190-2287bf446666"
diff --git a/src/analytics/backend/tests/test_backend.py b/src/analytics/backend/tests/test_backend.py
index 9221bb23ee041da06a4c1f401c75d1906f6748b0..c2e7fbe3100e9904b1128ac5349566c3bb7c8f51 100644
--- a/src/analytics/backend/tests/test_backend.py
+++ b/src/analytics/backend/tests/test_backend.py
@@ -34,6 +34,7 @@ def test_validate_kafka_topics():
     response = KafkaTopic.create_all_topics()
     assert isinstance(response, bool)
 
+# --- To test Start Streamer functionality ---
 def test_StartSparkStreamer():
     LOGGER.debug(" >>> test_StartSparkStreamer: START <<< ")
     analyzer_obj = create_analyzer()
@@ -52,26 +53,25 @@ def test_StartSparkStreamer():
     response = AnalyticsBackendServiceObj.StartSparkStreamer(analyzer_uuid, analyzer_to_generate)
     assert isinstance(response, bool)
 
+# --- To TEST StartRequestListenerFunctionality  
 # def test_StartRequestListener():
 #     LOGGER.info('test_RunRequestListener')
 #     AnalyticsBackendServiceObj = AnalyticsBackendService()
-#     response = AnalyticsBackendServiceObj.StartRequestListener() # response is Tuple (thread, stop_event)
-#     LOGGER.debug(str(response)) 
-#     assert isinstance(response, tuple)
+#     threading.Thread(target=AnalyticsBackendServiceObj.RequestListener, args=()).start()
 
 # To test START and STOP communication together
-def test_StopRequestListener():
-    LOGGER.info('test_RunRequestListener')
-    LOGGER.info('Initiating StartRequestListener...')
-    AnalyticsBackendServiceObj = AnalyticsBackendService()
-    response_thread = AnalyticsBackendServiceObj.StartRequestListener() # response is Tuple (thread, stop_event)
-    # LOGGER.debug(str(response_thread))
-    time.sleep(10)
-    LOGGER.info('Initiating StopRequestListener...')
-    AnalyticsBackendServiceObj = AnalyticsBackendService()
-    response = AnalyticsBackendServiceObj.StopRequestListener(response_thread)
-    LOGGER.debug(str(response)) 
-    assert isinstance(response, bool)
+# def test_StopRequestListener():
+#     LOGGER.info('test_RunRequestListener')
+#     LOGGER.info('Initiating StartRequestListener...')
+#     AnalyticsBackendServiceObj = AnalyticsBackendService()
+#     response_thread = AnalyticsBackendServiceObj.StartRequestListener() # response is Tuple (thread, stop_event)
+#     # LOGGER.debug(str(response_thread))
+#     time.sleep(10)
+#     LOGGER.info('Initiating StopRequestListener...')
+#     AnalyticsBackendServiceObj = AnalyticsBackendService()
+#     response = AnalyticsBackendServiceObj.StopRequestListener(response_thread)
+#     LOGGER.debug(str(response)) 
+#     assert isinstance(response, bool)
 
 # To independently tests the SparkListener functionality
 # def test_SparkListener():
diff --git a/src/analytics/frontend/requirements.in b/src/analytics/frontend/requirements.in
index d81b9ddbeafeff94c830d48ca5594e775b9ce240..0b1ec921b8bb77c0d26e8240585a19ef165f0eec 100644
--- a/src/analytics/frontend/requirements.in
+++ b/src/analytics/frontend/requirements.in
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-apscheduler==3.10.4
+apscheduler==3.10.1
 confluent-kafka==2.3.*
 psycopg2-binary==2.9.*
 SQLAlchemy==1.4.*
diff --git a/src/analytics/frontend/tests/messages.py b/src/analytics/frontend/tests/messages.py
index 646de962e8a213582fdb7cd1446ab57bda561a96..eb25c33b03744d627efae0a436e5bdce4553b4af 100644
--- a/src/analytics/frontend/tests/messages.py
+++ b/src/analytics/frontend/tests/messages.py
@@ -21,13 +21,14 @@ from common.proto.analytics_frontend_pb2 import ( AnalyzerOperationMode, Analyze
 def create_analyzer_id():
     _create_analyzer_id                  = AnalyzerId()
     # _create_analyzer_id.analyzer_id.uuid = str(uuid.uuid4())
-    _create_analyzer_id.analyzer_id.uuid = "efef4d95-1cf1-43c4-9742-95c283ddd7a6"
+    # _create_analyzer_id.analyzer_id.uuid = "efef4d95-1cf1-43c4-9742-95c283ddd7a6"
+    _create_analyzer_id.analyzer_id.uuid = "1e22f180-ba28-4641-b190-2287bf446666"
     return _create_analyzer_id
 
 def create_analyzer():
     _create_analyzer                              = Analyzer()
     # _create_analyzer.analyzer_id.analyzer_id.uuid = str(uuid.uuid4())
-    _create_analyzer.analyzer_id.analyzer_id.uuid = "efef4d95-1cf1-43c4-9742-95c283ddd7a6"
+    _create_analyzer.analyzer_id.analyzer_id.uuid = "1e22f180-ba28-4641-b190-2287bf446666"
     _create_analyzer.algorithm_name               = "Test_Aggergate_and_Threshold"
     _create_analyzer.operation_mode               = AnalyzerOperationMode.ANALYZEROPERATIONMODE_STREAMING
     
diff --git a/src/analytics/frontend/tests/test_frontend.py b/src/analytics/frontend/tests/test_frontend.py
index 9f5c040f366b02a6fea27e8e8696c0c118ece05a..1b4e0e14e687b454cdfdefe466dfd11e84bf245b 100644
--- a/src/analytics/frontend/tests/test_frontend.py
+++ b/src/analytics/frontend/tests/test_frontend.py
@@ -26,7 +26,6 @@ from common.Settings          import ( get_service_port_grpc, get_env_var_name,
 
 from common.tools.kafka.Variables                        import KafkaTopic
 from common.proto.kpi_value_api_pb2                      import KpiValue
-from common.proto.analytics_frontend_pb2                 import AnalyzerAlarms
 from analytics.frontend.client.AnalyticsFrontendClient   import AnalyticsFrontendClient
 from analytics.frontend.service.AnalyticsFrontendService import AnalyticsFrontendService
 from analytics.frontend.tests.messages                   import ( create_analyzer_id, create_analyzer,
@@ -34,7 +33,7 @@ from analytics.frontend.tests.messages                   import ( create_analyze
 from analytics.frontend.service.AnalyticsFrontendServiceServicerImpl import AnalyticsFrontendServiceServicerImpl
 from apscheduler.schedulers.background                   import BackgroundScheduler
 from apscheduler.triggers.interval                       import IntervalTrigger
-
+from common.proto.analytics_frontend_pb2                 import Analyzer, AnalyzerId, AnalyzerFilter, AnalyzerList
 
 ###########################
 # Tests Setup
@@ -85,46 +84,23 @@ def analyticsFrontend_client(analyticsFrontend_service : AnalyticsFrontendServic
 ###########################
 
 # --- "test_validate_kafka_topics" should be executed before the functionality tests ---
-def test_validate_kafka_topics():
-    LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ")
-    response = KafkaTopic.create_all_topics()
-    assert isinstance(response, bool)
-
-# # ----- core funtionality test -----
-def test_StartAnalytics(analyticsFrontend_client):
-    LOGGER.info(' >>> test_StartAnalytic START: <<< ')
-    stream = analyticsFrontend_client.StartAnalyzer(create_analyzer())
-    for response in stream:
-        LOGGER.debug(str(response))
-    assert isinstance(response, KpiValue)
-
-# To test start and stop listener together
 def test_StartStopAnalyzers(analyticsFrontend_client):
-    LOGGER.info(' >>> test_StartStopAnalyzers START: <<< ')
-    LOGGER.info('--> StartAnalyzer')
+    LOGGER.info(' >>> test_StartAnalyzers START: <<< ')
     added_analyzer_id = analyticsFrontend_client.StartAnalyzer(create_analyzer())
     LOGGER.debug(str(added_analyzer_id))
-    LOGGER.info(' --> Calling StartResponseListener... ')
-    class_obj = AnalyticsFrontendServiceServicerImpl()
-    response =  class_obj.StartResponseListener(added_analyzer_id.analyzer_id._uuid)
-    LOGGER.debug(response)
-    LOGGER.info("waiting for timer to comlete ...")
-    time.sleep(3)
-    LOGGER.info('--> StopAnalyzer')
-    response = analyticsFrontend_client.StopAnalyzer(added_analyzer_id)
+    assert isinstance(added_analyzer_id, AnalyzerId)
+
+def test_StopAnalytic(analyticsFrontend_client):
+    LOGGER.info(' >>> test_StopAnalytic START: <<< ')
+    response = analyticsFrontend_client.StopAnalyzer(create_analyzer_id())
     LOGGER.debug(str(response))
+    assert isinstance(response, Empty)
 
-# def test_SelectAnalytics(analyticsFrontend_client):
-#     LOGGER.info(' >>> test_SelectAnalytics START: <<< ')
-#     response = analyticsFrontend_client.SelectAnalyzers(create_analyzer_filter())
-#     LOGGER.debug(str(response))
-#     assert isinstance(response, AnalyzerList)
-
-# def test_StopAnalytic(analyticsFrontend_client):
-#     LOGGER.info(' >>> test_StopAnalytic START: <<< ')
-#     response = analyticsFrontend_client.StopAnalyzer(create_analyzer_id())
-#     LOGGER.debug(str(response))
-#     assert isinstance(response, Empty)
+def test_SelectAnalytics(analyticsFrontend_client):
+    LOGGER.info(' >>> test_SelectAnalytics START: <<< ')
+    response = analyticsFrontend_client.SelectAnalyzers(create_analyzer_filter())
+    LOGGER.debug(str(response))
+    assert isinstance(response, AnalyzerList)
 
 # def test_ResponseListener():
 #         LOGGER.info(' >>> test_ResponseListener START <<< ')
diff --git a/src/analytics/requirements.in b/src/analytics/requirements.in
index 8ff30ddaad25c39713f2e6f68c8d9aebed74dad0..231dc04e820387c95ffea72cbe67b9f0a9a0865a 100644
--- a/src/analytics/requirements.in
+++ b/src/analytics/requirements.in
@@ -12,8 +12,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-java==11.0.*
-pyspark==3.5.2
 confluent-kafka==2.3.*
 psycopg2-binary==2.9.*
 SQLAlchemy==1.4.*
diff --git a/src/common/tools/kafka/Variables.py b/src/common/tools/kafka/Variables.py
index b5cb3bbe07fee03f7e13cadb0b4d87766e80cb1c..9e432d637e70236d192d5248247175ef310d8368 100644
--- a/src/common/tools/kafka/Variables.py
+++ b/src/common/tools/kafka/Variables.py
@@ -25,11 +25,12 @@ class KafkaConfig(Enum):
 
     @staticmethod
     def get_kafka_address() -> str:
-        # kafka_server_address = get_setting('KFK_SERVER_ADDRESS', default=None)
-        # if kafka_server_address is None:
-        KFK_NAMESPACE        = get_setting('KFK_NAMESPACE')
-        KFK_PORT             = get_setting('KFK_SERVER_PORT')
-        kafka_server_address = KFK_SERVER_ADDRESS_TEMPLATE.format(KFK_NAMESPACE, KFK_PORT)
+        kafka_server_address = get_setting('KFK_SERVER_ADDRESS', default=None)
+        if kafka_server_address is None:
+            KFK_NAMESPACE        = get_setting('KFK_NAMESPACE')
+            KFK_PORT             = get_setting('KFK_SERVER_PORT')
+            kafka_server_address = KFK_SERVER_ADDRESS_TEMPLATE.format(KFK_NAMESPACE, KFK_PORT)
+        # kafka_server_address = "127.0.0.1:9092"
         return kafka_server_address
         
     @staticmethod
@@ -90,4 +91,4 @@ class KafkaTopic(Enum):
                 return False
         return True
 
-# create all topics after the deployments (Telemetry and Analytics)
+# TODO: create all topics after the deployments (Telemetry and Analytics)
diff --git a/src/kpi_manager/tests/test_messages.py b/src/kpi_manager/tests/test_messages.py
index 7b5c45859b6c10056211f9f33df950d9668c11ea..08a2dbf7334c3e4e68c3cfa6c27ce08532521342 100644
--- a/src/kpi_manager/tests/test_messages.py
+++ b/src/kpi_manager/tests/test_messages.py
@@ -27,6 +27,8 @@ def create_kpi_id_request():
 def create_kpi_descriptor_request(descriptor_name: str = "Test_name"):
     _create_kpi_request                                    = kpi_manager_pb2.KpiDescriptor()
     _create_kpi_request.kpi_id.kpi_id.uuid                 = str(uuid.uuid4())
+    # _create_kpi_request.kpi_id.kpi_id.uuid                 = "6e22f180-ba28-4641-b190-2287bf448888"
+    # _create_kpi_request.kpi_id.kpi_id.uuid                 = "1e22f180-ba28-4641-b190-2287bf446666"
     _create_kpi_request.kpi_description                    = descriptor_name
     _create_kpi_request.kpi_sample_type                    = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
     _create_kpi_request.device_id.device_uuid.uuid         = 'DEV2' 
diff --git a/src/kpi_value_api/.gitlab-ci.yml b/src/kpi_value_api/.gitlab-ci.yml
index 1a6f821ba9e798bb4220d914109ab3a65f0f1792..fef96d5b5bb6d4b33ea83419cb15051fdbf1d5ff 100644
--- a/src/kpi_value_api/.gitlab-ci.yml
+++ b/src/kpi_value_api/.gitlab-ci.yml
@@ -61,7 +61,8 @@ unit_test kpi-value-api:
       docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
       bitnami/zookeeper:latest
     - sleep 10 # Wait for Zookeeper to start
-    - docker run --name kafka -d --network=teraflowbridge -p 9092:9092
+    - >
+      docker run --name kafka -d --network=teraflowbridge -p 9092:9092
       --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
       --env ALLOW_PLAINTEXT_LISTENER=yes
       bitnami/kafka:latest
diff --git a/src/kpi_value_api/requirements.in b/src/kpi_value_api/requirements.in
index e95d6d8bbd81abca7eb2622e1faf6af473fcdb12..0615fa833f255bf91fd72fc484e40842face7a44 100644
--- a/src/kpi_value_api/requirements.in
+++ b/src/kpi_value_api/requirements.in
@@ -15,4 +15,4 @@
 confluent-kafka==2.3.*
 requests==2.27.*
 prometheus-api-client==0.5.3
-apscheduler==3.10.4
+apscheduler==3.10.1
diff --git a/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py b/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py
index 0f57f88219a74108a555cf87e9bdb98999fd5da2..0e2d49300ccf1cffed7f09435136055b8c70615e 100644
--- a/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py
+++ b/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py
@@ -157,7 +157,7 @@ class KpiValueApiServiceServicerImpl(KpiValueAPIServiceServicer):
                 value["window"]["end"], "%Y-%m-%dT%H:%M:%S.%fZ").timestamp()
             response.kpi_id.kpi_id.uuid  = value['kpi_id']
             for key, threshold in value.items():
-                if "THRESHOLD_" in key:
+                if key not in ['kpi_id', 'window']:
                     response.alarms[key] = threshold
 
             yield response
diff --git a/src/kpi_value_api/tests/test_kpi_value_api.py b/src/kpi_value_api/tests/test_kpi_value_api.py
index ea6b22585a68d565c8162f1629a6a1ac4a4f6d6a..c7eb8fef1c79d0280c951d89971695d2c39ab599 100644
--- a/src/kpi_value_api/tests/test_kpi_value_api.py
+++ b/src/kpi_value_api/tests/test_kpi_value_api.py
@@ -78,6 +78,13 @@ def test_validate_kafka_topics():
     response = KafkaTopic.create_all_topics()
     assert isinstance(response, bool)
 
+# def test_GetKpiAlarms(kpi_value_api_client):
+#     LOGGER.debug(" >>> test_GetKpiAlarms")
+#     stream = kpi_value_api_client.GetKpiAlarms(create_kpi_id_request())
+#     for response in stream:
+#         LOGGER.debug(str(response))
+#     assert isinstance(response, KpiAlarms)
+
 def test_store_kpi_values(kpi_value_api_client):
     LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ")
     response = kpi_value_api_client.StoreKpiValues(create_kpi_value_list())
diff --git a/src/kpi_value_writer/.gitlab-ci.yml b/src/kpi_value_writer/.gitlab-ci.yml
index 9a2f9fd47e435b26e2e3a335bd9b95da58a0517f..4b36165d02327332766197676d76da098a857045 100644
--- a/src/kpi_value_writer/.gitlab-ci.yml
+++ b/src/kpi_value_writer/.gitlab-ci.yml
@@ -61,7 +61,8 @@ unit_test kpi-value-writer:
       docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
       bitnami/zookeeper:latest
     - sleep 10 # Wait for Zookeeper to start
-    - docker run --name kafka -d --network=teraflowbridge -p 9092:9092
+    - >
+      docker run --name kafka -d --network=teraflowbridge -p 9092:9092
       --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
       --env ALLOW_PLAINTEXT_LISTENER=yes
       bitnami/kafka:latest
diff --git a/src/telemetry/.gitlab-ci.yml b/src/telemetry/.gitlab-ci.yml
index 110a6490d20558c6589550be45b6432e500ba9d6..48fd2f49384770acacbba551443c68e3e0950475 100644
--- a/src/telemetry/.gitlab-ci.yml
+++ b/src/telemetry/.gitlab-ci.yml
@@ -71,7 +71,8 @@ unit_test telemetry-backend:
       docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
       bitnami/zookeeper:latest
     - sleep 10 # Wait for Zookeeper to start
-    - docker run --name kafka -d --network=teraflowbridge -p 9092:9092
+    - >
+      docker run --name kafka -d --network=teraflowbridge -p 9092:9092
       --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
       --env ALLOW_PLAINTEXT_LISTENER=yes
       bitnami/kafka:latest
diff --git a/src/telemetry/backend/service/TelemetryBackendService.py b/src/telemetry/backend/service/TelemetryBackendService.py
index 078fa5896d5fb5033833e0e2ef2248613ef80c18..f3cf18d65eac0fda6c56a366ce8d806a4ea2562a 100755
--- a/src/telemetry/backend/service/TelemetryBackendService.py
+++ b/src/telemetry/backend/service/TelemetryBackendService.py
@@ -17,7 +17,8 @@ import time
 import random
 import logging
 import threading
-from typing import Any, Dict
+from typing   import Any, Dict
+from datetime import datetime, timezone
 # from common.proto.context_pb2 import Empty
 from confluent_kafka import Producer as KafkaProducer
 from confluent_kafka import Consumer as KafkaConsumer
@@ -53,6 +54,8 @@ class TelemetryBackendService(GenericGrpcService):
         """
         listener for requests on Kafka topic.
         """
+        LOGGER.info('Telemetry backend request listener is running ...')
+        print      ('Telemetry backend request listener is running ...')
         consumer = self.kafka_consumer
         consumer.subscribe([KafkaTopic.REQUEST.value])
         while True:
@@ -65,16 +68,19 @@ class TelemetryBackendService(GenericGrpcService):
                 else:
                     print("Consumer error: {}".format(receive_msg.error()))
                     break
-            
-            collector = json.loads(receive_msg.value().decode('utf-8'))
-            collector_id = receive_msg.key().decode('utf-8')
-            LOGGER.debug('Recevied Collector: {:} - {:}'.format(collector_id, collector))
-            print('Recevied Collector: {:} - {:}'.format(collector_id, collector))
+            try: 
+                collector = json.loads(receive_msg.value().decode('utf-8'))
+                collector_id = receive_msg.key().decode('utf-8')
+                LOGGER.debug('Recevied Collector: {:} - {:}'.format(collector_id, collector))
+                print('Recevied Collector: {:} - {:}'.format(collector_id, collector))
 
-            if collector['duration'] == -1 and collector['interval'] == -1:
-                self.TerminateCollectorBackend(collector_id)
-            else:
-                self.RunInitiateCollectorBackend(collector_id, collector)
+                if collector['duration'] == -1 and collector['interval'] == -1:
+                    self.TerminateCollectorBackend(collector_id)
+                else:
+                    self.RunInitiateCollectorBackend(collector_id, collector)
+            except Exception as e:
+                LOGGER.warning("Unable to consumer message from topic: {:}. ERROR: {:}".format(KafkaTopic.REQUEST.value, e))
+                print         ("Unable to consumer message from topic: {:}. ERROR: {:}".format(KafkaTopic.REQUEST.value, e))
 
     def TerminateCollectorBackend(self, collector_id):
         if collector_id in self.running_threads:
@@ -101,7 +107,7 @@ class TelemetryBackendService(GenericGrpcService):
         print("Initiating backend for collector: ", collector_id)
         start_time = time.time()
         while not stop_event.is_set():
-            if time.time() - start_time >= collector['duration']:            # condition to terminate backend
+            if int(collector['duration']) != -1 and time.time() - start_time >= collector['duration']:            # condition to terminate backend
                 print("Execuation duration completed: Terminating backend: Collector Id: ", collector_id, " - ", time.time() - start_time)
                 self.GenerateCollectorTerminationSignal(collector_id, "-1", -1)       # Termination confirmation to frontend.
                 break
@@ -140,7 +146,7 @@ class TelemetryBackendService(GenericGrpcService):
         """
         producer = self.kafka_producer
         kpi_value : Dict = {
-            "time_stamp": str(time.time()),
+            "time_stamp": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
             "kpi_id"    : kpi_id,
             "kpi_value" : measured_kpi_value
         }
diff --git a/src/telemetry/backend/tests/messagesBackend.py b/src/telemetry/backend/tests/messages.py
similarity index 100%
rename from src/telemetry/backend/tests/messagesBackend.py
rename to src/telemetry/backend/tests/messages.py
diff --git a/src/telemetry/backend/tests/test_TelemetryBackend.py b/src/telemetry/backend/tests/test_backend.py
similarity index 91%
rename from src/telemetry/backend/tests/test_TelemetryBackend.py
rename to src/telemetry/backend/tests/test_backend.py
index 665fa825e3ee31b2e92351d9c5855f627ce40fa1..4764d7f5f10aefe211bae840f06eed9c82386bf8 100644
--- a/src/telemetry/backend/tests/test_TelemetryBackend.py
+++ b/src/telemetry/backend/tests/test_backend.py
@@ -34,5 +34,4 @@ LOGGER = logging.getLogger(__name__)
 def test_RunRequestListener():
     LOGGER.info('test_RunRequestListener')
     TelemetryBackendServiceObj = TelemetryBackendService()
-    response = threading.Thread(target=TelemetryBackendServiceObj.RequestListener).start()
-    LOGGER.debug(str(response))
+    threading.Thread(target=TelemetryBackendServiceObj.RequestListener).start()
\ No newline at end of file
diff --git a/src/telemetry/frontend/tests/Messages.py b/src/telemetry/frontend/tests/Messages.py
index a0e93e8a121b9efaac83f7169419911c8ee6e3ea..e6d8ef439f4ad4764c5a6f8b5f36ec68cbb10867 100644
--- a/src/telemetry/frontend/tests/Messages.py
+++ b/src/telemetry/frontend/tests/Messages.py
@@ -22,15 +22,18 @@ from common.proto.kpi_manager_pb2 import KpiId
 def create_collector_id():
     _collector_id                   = telemetry_frontend_pb2.CollectorId()
     # _collector_id.collector_id.uuid = str(uuid.uuid4())
-    _collector_id.collector_id.uuid = "5d45f53f-d567-429f-9427-9196ac72ff0c"
+    _collector_id.collector_id.uuid = "efef4d95-1cf1-43c4-9742-95c283dddddd"
     return _collector_id
 
 def create_collector_request():
     _create_collector_request                                = telemetry_frontend_pb2.Collector()
-    _create_collector_request.collector_id.collector_id.uuid = str(uuid.uuid4())
-    _create_collector_request.kpi_id.kpi_id.uuid             = str(uuid.uuid4())
-    _create_collector_request.duration_s                     = float(random.randint(8, 16))
-    _create_collector_request.interval_s                     = float(random.randint(2, 4))
+    # _create_collector_request.collector_id.collector_id.uuid = str(uuid.uuid4()) 
+    _create_collector_request.collector_id.collector_id.uuid = "efef4d95-1cf1-43c4-9742-95c283dddddd"
+    # _create_collector_request.kpi_id.kpi_id.uuid             = str(uuid.uuid4())
+    _create_collector_request.kpi_id.kpi_id.uuid             = "6e22f180-ba28-4641-b190-2287bf448888"
+    # _create_collector_request.duration_s                     = float(random.randint(8, 16))
+    _create_collector_request.duration_s                     = -1
+    _create_collector_request.interval_s                     = float(random.randint(3, 5))
     return _create_collector_request
 
 def create_collector_filter():
diff --git a/src/telemetry/frontend/tests/test_frontend.py b/src/telemetry/frontend/tests/test_frontend.py
index 9c3f9d3a8f545792eb2bb3a371c6c20664d24f69..c3f8091c83f56fd4a134ec092b1e22723040595d 100644
--- a/src/telemetry/frontend/tests/test_frontend.py
+++ b/src/telemetry/frontend/tests/test_frontend.py
@@ -105,47 +105,10 @@ def test_SelectCollectors(telemetryFrontend_client):
     LOGGER.debug(str(response))
     assert isinstance(response, CollectorList)
 
-# ----- Non-gRPC method tests ----- 
-def test_RunResponseListener():
-    LOGGER.info(' >>> test_RunResponseListener START: <<< ')
-    TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl()
-    response = TelemetryFrontendServiceObj.RunResponseListener()     # becasue Method "run_kafka_listener" is not define in frontend.proto
-    LOGGER.debug(str(response))
-    assert isinstance(response, bool)
-
-# ------- previous test ----------------
-
-# def test_verify_db_and_table():
-#     LOGGER.info(' >>> test_verify_database_and_tables START: <<< ')
-#     _engine = TelemetryEngine.get_engine()
-#     managementDB.create_database(_engine)
-#     managementDB.create_tables(_engine)
-
-# def test_StartCollector(telemetryFrontend_client):
-#     LOGGER.info(' >>> test_StartCollector START: <<< ')
-#     response = telemetryFrontend_client.StartCollector(create_collector_request())
-#     LOGGER.debug(str(response))
-#     assert isinstance(response, CollectorId)
-
-# def test_run_kafka_listener():
-#     LOGGER.info(' >>> test_run_kafka_listener START: <<< ')
-#     name_mapping = NameMapping()
-#     TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl(name_mapping)
-#     response = TelemetryFrontendServiceObj.run_kafka_listener()     # Method "run_kafka_listener" is not define in frontend.proto
+# # ----- Non-gRPC method tests ----- 
+# def test_RunResponseListener():
+#     LOGGER.info(' >>> test_RunResponseListener START: <<< ')
+#     TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl()
+#     response = TelemetryFrontendServiceObj.RunResponseListener()     # becasue Method "run_kafka_listener" is not define in frontend.proto
 #     LOGGER.debug(str(response))
 #     assert isinstance(response, bool)
-
-# def test_StopCollector(telemetryFrontend_client):
-#     LOGGER.info(' >>> test_StopCollector START: <<< ')
-#     _collector_id = telemetryFrontend_client.StartCollector(create_collector_request())
-#     time.sleep(3)   # wait for small amount before call the stopCollecter()
-#     response = telemetryFrontend_client.StopCollector(_collector_id)
-#     LOGGER.debug(str(response))
-#     assert isinstance(response, Empty)
-
-# def test_select_collectors(telemetryFrontend_client):
-#     LOGGER.info(' >>> test_select_collector requesting <<< ')
-#     response = telemetryFrontend_client.SelectCollectors(create_collector_filter())
-#     LOGGER.info('Received Rows after applying Filter: {:} '.format(response))
-#     LOGGER.debug(str(response))
-#     assert isinstance(response, CollectorList)
\ No newline at end of file
diff --git a/src/telemetry/requirements.in b/src/telemetry/requirements.in
index a0e78d2bfb7270b9664ad5ba810e2f213d887bf7..4e9981afbcb7460344f160dcecf329f44d14e792 100644
--- a/src/telemetry/requirements.in
+++ b/src/telemetry/requirements.in
@@ -12,13 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-anytree==2.8.0
 APScheduler==3.10.1
-influx-line-protocol==0.1.4
 psycopg2-binary==2.9.3
 python-dateutil==2.8.2
 python-json-logger==2.0.2
 pytz==2024.1
-questdb==1.0.1
-requests==2.27.1
-xmltodict==0.12.0
\ No newline at end of file
+requests==2.27.1
\ No newline at end of file