diff --git a/deploy/all.sh b/deploy/all.sh
index cedbb5b8bfc7ef363c1d60a9d8f2b6cef63be384..f93cd92ac5e3189b0dc8fa71d74a586e929aaecc 100755
--- a/deploy/all.sh
+++ b/deploy/all.sh
@@ -29,9 +29,12 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
 # By default, only basic components are deployed
 export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice nbi webui load_generator"}
 
-# Uncomment to activate Monitoring
+# Uncomment to activate Monitoring (old)
 #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
 
+# Uncomment to activate Monitoring Framework (new)
+#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api"
+
 # Uncomment to activate BGP-LS Speaker
 #export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
 
diff --git a/deploy/kafka.sh b/deploy/kafka.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4a91bfc9e657d1b8a6a548b9c0a81a2f8a0b45e0
--- /dev/null
+++ b/deploy/kafka.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+# If not already set, set the namespace where Apache Kafka will be deployed.
+export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"}
+
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Constants
+TMP_FOLDER="./tmp"
+KFK_MANIFESTS_PATH="manifests/kafka"
+KFK_ZOOKEEPER_MANIFEST="01-zookeeper.yaml"
+KFK_MANIFEST="02-kafka.yaml"
+
+# Create a tmp folder for files modified during the deployment
+TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${KFK_NAMESPACE}/manifests"
+mkdir -p ${TMP_MANIFESTS_FOLDER}
+
+# copy zookeeper and kafka manifest files to temporary manifest location
+cp "${KFK_MANIFESTS_PATH}/${KFK_ZOOKEEPER_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}"
+cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}"
+
+echo "Apache Kafka Namespace"
+echo ">>> Delete Apache Kafka Namespace"
+kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found
+
+echo ">>> Create Apache Kafka Namespace"
+kubectl create namespace ${KFK_NAMESPACE}
+
+echo ">>> Deplying Apache Kafka Zookeeper"
+# Kafka zookeeper service should be deployed before the kafka service
+kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}"
+
+KFK_ZOOKEEPER_SERVICE="zookeeper-service"    # this command may be replaced with command to extract service name automatically
+KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}')
+
+# Kafka service should be deployed after the zookeeper service
+sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
+
+echo ">>> Deploying Apache Kafka Broker"
+kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
+
+echo ">>> Verifing Apache Kafka deployment"
+sleep 10
+KFK_PODS_STATUS=$(kubectl --namespace ${KFK_NAMESPACE} get pods)
+if echo "$KFK_PODS_STATUS" | grep -qEv 'STATUS|Running'; then
+    echo "Deployment Error: \n $KFK_PODS_STATUS"
+else
+    echo "$KFK_PODS_STATUS"
+fi
\ No newline at end of file
diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index 04895f98448694c4eca3861079df09c366b0000f..f61cdb991b09b389b20fbb82f619c0f8f3b4cdc2 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
 
 # If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
 # By default, only basic components are deployed
-export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator"}
+export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice nbi webui load_generator"}
 
 # If not already set, set the tag you want to use for your images.
 export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
@@ -137,10 +137,23 @@ printf "\n"
 
 echo "Create secret with CockroachDB data"
 CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+CRDB_DATABASE_CONTEXT=${CRDB_DATABASE}  # TODO: change by specific configurable environment variable
 kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
     --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
     --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
-    --from-literal=CRDB_DATABASE=${CRDB_DATABASE} \
+    --from-literal=CRDB_DATABASE=${CRDB_DATABASE_CONTEXT} \
+    --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
+    --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
+    --from-literal=CRDB_SSLMODE=require
+printf "\n"
+
+echo "Create secret with CockroachDB data for KPI Management"
+CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+CRDB_DATABASE_KPI_MGMT="tfs_kpi_mgmt"  # TODO: change by specific configurable environment variable
+kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
+    --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
+    --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
+    --from-literal=CRDB_DATABASE=${CRDB_DATABASE_KPI_MGMT} \
     --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
     --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
     --from-literal=CRDB_SSLMODE=require
diff --git a/install_requirements.sh b/install_requirements.sh
index cbd378eca81af17386100fc0ceb3757912d0ebf5..54b660a521dadc08a344d2f79f2db15271131a21 100755
--- a/install_requirements.sh
+++ b/install_requirements.sh
@@ -22,6 +22,7 @@
 ALL_COMPONENTS="context device service nbi monitoring webui interdomain slice"
 ALL_COMPONENTS="${ALL_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector"
 ALL_COMPONENTS="${ALL_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector"
+ALL_COMPONENTS="${ALL_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api"
 TFS_COMPONENTS=${TFS_COMPONENTS:-$ALL_COMPONENTS}
 
 # Some components require libyang built from source code
diff --git a/manifests/kafka/01-zookeeper.yaml b/manifests/kafka/01-zookeeper.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c0e87ae0c6f12ed56702220f9e15fbe90b3b9c31
--- /dev/null
+++ b/manifests/kafka/01-zookeeper.yaml
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: zookeeper-service
+  name: zookeeper-service
+  namespace: kafka
+spec:
+  type: NodePort
+  ports:
+    - name: zookeeper-port
+      port: 2181
+      nodePort: 30181
+      targetPort: 2181
+  selector:
+    app: zookeeper
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    app: zookeeper
+  name: zookeeper
+  namespace: kafka
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: zookeeper
+  template:
+    metadata:
+      labels:
+        app: zookeeper
+    spec:
+      containers:
+        - image: wurstmeister/zookeeper
+          imagePullPolicy: IfNotPresent
+          name: zookeeper
+          ports:
+            - containerPort: 2181
\ No newline at end of file
diff --git a/manifests/kafka/02-kafka.yaml b/manifests/kafka/02-kafka.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8e4562e6eabec34bf3b87912310479bd98022aeb
--- /dev/null
+++ b/manifests/kafka/02-kafka.yaml
@@ -0,0 +1,61 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: kafka-broker
+  name: kafka-service
+  namespace: kafka
+spec:
+  ports:
+  - port: 9092
+  selector:
+    app: kafka-broker
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    app: kafka-broker
+  name: kafka-broker
+  namespace: kafka
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: kafka-broker
+  template:
+    metadata:
+      labels:
+        app: kafka-broker
+    spec:
+      hostname: kafka-broker
+      containers:
+      - env:
+        - name: KAFKA_BROKER_ID
+          value: "1"
+        - name: KAFKA_ZOOKEEPER_CONNECT
+          value: <ZOOKEEPER_INTERNAL_IP>:2181
+        - name: KAFKA_LISTENERS
+          value: PLAINTEXT://:9092
+        - name: KAFKA_ADVERTISED_LISTENERS
+          value: PLAINTEXT://localhost:9092
+        image: wurstmeister/kafka
+        imagePullPolicy: IfNotPresent
+        name: kafka-broker
+        ports:
+          - containerPort: 9092
\ No newline at end of file
diff --git a/manifests/kpi_managerservice.yaml b/manifests/kpi_managerservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..984d783a9de7ed3c0c02e87d82ec673dc19c9508
--- /dev/null
+++ b/manifests/kpi_managerservice.yaml
@@ -0,0 +1,99 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: kpi-managerservice
+spec:
+  selector:
+    matchLabels:
+      app: kpi-managerservice
+  #replicas: 1
+  template:
+    metadata:
+      annotations:
+        config.linkerd.io/skip-outbound-ports: "4222"
+      labels:
+        app: kpi-managerservice
+    spec:
+      terminationGracePeriodSeconds: 5
+      containers:
+        - name: server
+          image: labs.etsi.org:5050/tfs/controller/kpi_manager:latest
+          imagePullPolicy: Always
+          ports:
+            - containerPort: 30010
+            - containerPort: 9192
+          env:
+            - name: LOG_LEVEL
+              value: "INFO"
+          envFrom:
+            - secretRef:
+                name: crdb-kpi-data
+          readinessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30010"]
+          livenessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30010"]
+          resources:
+            requests:
+              cpu: 250m
+              memory: 128Mi
+            limits:
+              cpu: 1000m
+              memory: 1024Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: kpi-managerservice
+  labels:
+    app: kpi-managerservice
+spec:
+  type: ClusterIP
+  selector:
+    app: kpi-managerservice
+  ports:
+    - name: grpc
+      protocol: TCP
+      port: 30010
+      targetPort: 30010
+    - name: metrics
+      protocol: TCP
+      port: 9192
+      targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: kpi-managerservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: kpi-managerservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+    - type: Resource
+      resource:
+        name: cpu
+        target:
+          type: Utilization
+          averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/kpi_value_apiservice.yaml b/manifests/kpi_value_apiservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..74eb90f675794f1b451b04af55e191edff58fae5
--- /dev/null
+++ b/manifests/kpi_value_apiservice.yaml
@@ -0,0 +1,96 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: kpi-value-apiservice
+spec:
+  selector:
+    matchLabels:
+      app: kpi-value-apiservice
+  #replicas: 1
+  template:
+    metadata:
+      annotations:
+        config.linkerd.io/skip-outbound-ports: "4222"
+      labels:
+        app: kpi-value-apiservice
+    spec:
+      terminationGracePeriodSeconds: 5
+      containers:
+        - name: server
+          image: labs.etsi.org:5050/tfs/controller/kpi_value_api:latest
+          imagePullPolicy: Always
+          ports:
+            - containerPort: 30020
+            - containerPort: 9192
+          env:
+            - name: LOG_LEVEL
+              value: "INFO"
+          readinessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30020"]
+          livenessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30020"]
+          resources:
+            requests:
+              cpu: 250m
+              memory: 128Mi
+            limits:
+              cpu: 1000m
+              memory: 1024Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: kpi-value-apiservice
+  labels:
+    app: kpi-value-apiservice
+spec:
+  type: ClusterIP
+  selector:
+    app: kpi-value-apiservice
+  ports:
+    - name: grpc
+      protocol: TCP
+      port: 30020
+      targetPort: 30020
+    - name: metrics
+      protocol: TCP
+      port: 9192
+      targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: kpi-value-apiservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: kpi-value-apiservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+    - type: Resource
+      resource:
+        name: cpu
+        target:
+          type: Utilization
+          averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/kpi_value_writerservice.yaml b/manifests/kpi_value_writerservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8a8e44ec2a571f1290e30a08d1c896a6339cbe46
--- /dev/null
+++ b/manifests/kpi_value_writerservice.yaml
@@ -0,0 +1,96 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: kpi-value-writerservice
+spec:
+  selector:
+    matchLabels:
+      app: kpi-value-writerservice
+  #replicas: 1
+  template:
+    metadata:
+      annotations:
+        config.linkerd.io/skip-outbound-ports: "4222"
+      labels:
+        app: kpi-value-writerservice
+    spec:
+      terminationGracePeriodSeconds: 5
+      containers:
+        - name: server
+          image: labs.etsi.org:5050/tfs/controller/kpi_value_writer:latest
+          imagePullPolicy: Always
+          ports:
+            - containerPort: 30030
+            - containerPort: 9192
+          env:
+            - name: LOG_LEVEL
+              value: "INFO"
+          readinessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30030"]
+          livenessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30030"]
+          resources:
+            requests:
+              cpu: 250m
+              memory: 128Mi
+            limits:
+              cpu: 1000m
+              memory: 1024Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: kpi-value-writerservice
+  labels:
+    app: kpi-value-writerservice
+spec:
+  type: ClusterIP
+  selector:
+    app: kpi-value-writerservice
+  ports:
+    - name: grpc
+      protocol: TCP
+      port: 30030
+      targetPort: 30030
+    - name: metrics
+      protocol: TCP
+      port: 9192
+      targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: kpi-value-writerservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: kpi-value-writerservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+    - type: Resource
+      resource:
+        name: cpu
+        target:
+          type: Utilization
+          averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/my_deploy.sh b/my_deploy.sh
index 6007a7ff971231c6c135dfad7b9385187f028421..b89df7481ebd17edf2b966eb818598d1a04a596f 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -22,9 +22,12 @@ export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
 export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator"
 
-# Uncomment to activate Monitoring
+# Uncomment to activate Monitoring (old)
 #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
 
+# Uncomment to activate Monitoring Framework (new)
+#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api"
+
 # Uncomment to activate BGP-LS Speaker
 #export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
 
@@ -69,7 +72,7 @@ export TFS_K8S_NAMESPACE="tfs"
 export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
 
 # Uncomment to monitor performance of components
-export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
 
 # Uncomment when deploying Optical CyberSecurity
 #export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
@@ -171,3 +174,10 @@ export PROM_EXT_PORT_HTTP="9090"
 
 # Set the external port Grafana HTTP Dashboards will be exposed to.
 export GRAF_EXT_PORT_HTTP="3000"
+
+
+# ----- Apache Kafka -----------------------------------------------------------
+
+# Set the namespace where Apache Kafka will be deployed.
+export KFK_NAMESPACE="kafka"
+
diff --git a/proto/analytics_frontend.proto b/proto/analytics_frontend.proto
new file mode 100644
index 0000000000000000000000000000000000000000..096c1ee035ae663359d9f4df1e071d3997a0d351
--- /dev/null
+++ b/proto/analytics_frontend.proto
@@ -0,0 +1,69 @@
+// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package analytics_frontend;
+
+import "context.proto";
+import "kpi_manager.proto";
+//import "kpi_sample_types.proto";
+
+service AnalyticsFrontendService {
+  rpc StartAnalyzer  (Analyzer      ) returns (AnalyzerId   ) {}
+  rpc StopAnalyzer   (AnalyzerId    ) returns (context.Empty) {}
+  rpc SelectAnalyzers(AnalyzerFilter) returns (AnalyzerList ) {}
+}
+
+message AnalyzerId {
+  context.Uuid analyzer_id = 1;
+}
+
+enum AnalyzerOperationMode {
+  ANALYZEROPERATIONMODE_BATCH     = 0;
+  ANALYZEROPERATIONMODE_STREAMING = 1;
+}
+
+message Analyzer {
+  string                     algorithm_name       = 1; // The algorithm to be executed
+  repeated kpi_manager.KpiId input_kpi_ids        = 2; // The KPI Ids to be processed by the analyzer
+  repeated kpi_manager.KpiId output_kpi_ids       = 3; // The KPI Ids produced by the analyzer
+  AnalyzerOperationMode      operation_mode       = 4; // Operation mode of the analyzer
+
+  // In batch mode...
+  float                      batch_min_duration_s = 5; // ..., min duration to collect before executing batch
+  float                      batch_max_duration_s = 6; // ..., max duration collected to execute the batch
+  uint64                     batch_min_size       = 7; // ..., min number of samples to collect before executing batch
+  uint64                     batch_max_size       = 8; // ..., max number of samples collected to execute the batch
+}
+
+message AnalyzerFilter {
+  // Analyzer that fulfill the filter are those that match ALL the following fields.
+  // An empty list means: any value is accepted.
+  // All fields empty means: list all Analyzers
+  repeated AnalyzerId                     analyzer_id     = 1;
+  repeated string                         algorithm_names = 2;
+  repeated kpi_manager.KpiId              input_kpi_ids   = 3;
+  repeated kpi_manager.KpiId              output_kpi_ids  = 4;
+  //repeated kpi_sample_types.KpiSampleType kpi_sample_type = 5; // Not implemented
+  //repeated context.DeviceId               device_id       = 6; // Not implemented
+  //repeated context.EndPointId             endpoint_id     = 7; // Not implemented
+  //repeated context.ServiceId              service_id      = 8; // Not implemented
+  //repeated context.SliceId                slice_id        = 9; // Not implemented
+  //repeated context.ConnectionId           connection_id   = 10; // Not implemented
+  //repeated context.LinkId                 link_id         = 11; // Not implemented
+}
+
+message AnalyzerList {
+  repeated Analyzer analyzer_list = 1;
+}
diff --git a/proto/device.proto b/proto/device.proto
index 3d7ba14bb75e226c51d8d2462fca76a1cab86554..a1882f33f8e177502c456672a0517928f0259ef5 100644
--- a/proto/device.proto
+++ b/proto/device.proto
@@ -16,7 +16,7 @@ syntax = "proto3";
 package device;
 
 import "context.proto";
-import "monitoring.proto";
+import "monitoring.proto"; // to be migrated to: "kpi_manager.proto"
 
 service DeviceService {
   rpc AddDevice       (context.Device    ) returns (context.DeviceId    ) {}
@@ -27,8 +27,8 @@ service DeviceService {
 }
 
 message MonitoringSettings {
-  monitoring.KpiId kpi_id = 1;
-  monitoring.KpiDescriptor kpi_descriptor = 2;
-  float sampling_duration_s = 3;
-  float sampling_interval_s = 4;
+  monitoring.KpiId         kpi_id              = 1; // to be migrated to: "kpi_manager.KpiId"
+  monitoring.KpiDescriptor kpi_descriptor      = 2; // to be migrated to: "kpi_manager.KpiDescriptor"
+  float                    sampling_duration_s = 3;
+  float                    sampling_interval_s = 4;
 }
diff --git a/proto/kpi_manager.proto b/proto/kpi_manager.proto
new file mode 100644
index 0000000000000000000000000000000000000000..2640b58c60f004e51c8aeacc0ed76963f0436956
--- /dev/null
+++ b/proto/kpi_manager.proto
@@ -0,0 +1,60 @@
+// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package kpi_manager;
+
+import "context.proto";
+import "kpi_sample_types.proto";
+
+service KpiManagerService {
+  rpc SetKpiDescriptor    (KpiDescriptor      ) returns (KpiId               ) {}
+  rpc DeleteKpiDescriptor (KpiId              ) returns (context.Empty       ) {}
+  rpc GetKpiDescriptor    (KpiId              ) returns (KpiDescriptor       ) {}
+  rpc SelectKpiDescriptor (KpiDescriptorFilter) returns (KpiDescriptorList   ) {}
+}
+
+message KpiId {
+  context.Uuid kpi_id = 1;
+}
+
+message KpiDescriptor {
+  KpiId                          kpi_id          = 1;
+  string                         kpi_description = 2;
+  kpi_sample_types.KpiSampleType kpi_sample_type = 3;
+  context.DeviceId               device_id       = 4;
+  context.EndPointId             endpoint_id     = 5;
+  context.ServiceId              service_id      = 6;
+  context.SliceId                slice_id        = 7;
+  context.ConnectionId           connection_id   = 8;
+  context.LinkId                 link_id         = 9;
+}
+
+message KpiDescriptorFilter {
+  // KPI Descriptors that fulfill the filter are those that match ALL the following fields.
+  // An empty list means: any value is accepted.
+  // All fields empty means: list all KPI Descriptors
+  repeated KpiId                          kpi_id          = 1;
+  repeated kpi_sample_types.KpiSampleType kpi_sample_type = 2;
+  repeated context.DeviceId               device_id       = 3;
+  repeated context.EndPointId             endpoint_id     = 4;
+  repeated context.ServiceId              service_id      = 5;
+  repeated context.SliceId                slice_id        = 6;
+  repeated context.ConnectionId           connection_id   = 7;
+  repeated context.LinkId                 link_id         = 8;
+}
+
+message KpiDescriptorList {
+  repeated KpiDescriptor kpi_descriptor_list = 1;
+}
diff --git a/proto/kpi_value_api.proto b/proto/kpi_value_api.proto
new file mode 100644
index 0000000000000000000000000000000000000000..dff96272e3d05756dd19a49ecaede7311b196540
--- /dev/null
+++ b/proto/kpi_value_api.proto
@@ -0,0 +1,52 @@
+// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package kpi_value_api;
+
+import "context.proto";
+import "kpi_manager.proto";
+
+service KpiValueAPIService {
+	rpc StoreKpiValues  (KpiValueList)   returns (context.Empty) {}
+	rpc SelectKpiValues (KpiValueFilter) returns (KpiValueList)  {}
+}
+
+message KpiValue {
+	kpi_manager.KpiId kpi_id         = 1;
+	context.Timestamp timestamp      = 2;
+	KpiValueType      kpi_value_type = 3;
+}
+
+message KpiValueList {
+	repeated KpiValue kpi_value_list = 1;
+}
+
+message KpiValueType {
+  oneof value {
+    int32  int32Val  = 1;
+    uint32 uint32Val = 2;
+    int64  int64Val  = 3;
+    uint64 uint64Val = 4;
+    float  floatVal  = 5;
+    string stringVal = 6;
+    bool   boolVal   = 7;
+  }
+}
+
+message KpiValueFilter {
+	repeated kpi_manager.KpiId kpi_id          = 1;
+	repeated context.Timestamp start_timestamp = 2;
+	repeated context.Timestamp end_timestamp   = 3;
+}
diff --git a/proto/monitoring.proto b/proto/monitoring.proto
old mode 100644
new mode 100755
index 2c1c2f8ad58192586c17e310e33bccebbe775ee8..083bd82854547478d3a8f4a8935fdf75e9070d9d
--- a/proto/monitoring.proto
+++ b/proto/monitoring.proto
@@ -145,12 +145,12 @@ message SubsList {
 }
 
 message AlarmDescriptor {
-  AlarmID                     alarm_id              = 1;
-  string                      alarm_description     = 2;
-  string                      name                  = 3;
-  KpiId                       kpi_id                = 4;
-  KpiValueRange               kpi_value_range       = 5;
-  context.Timestamp           timestamp             = 6;
+  AlarmID           alarm_id          = 1;
+  string            alarm_description = 2;
+  string            name              = 3;
+  KpiId             kpi_id            = 4;
+  KpiValueRange     kpi_value_range   = 5;
+  context.Timestamp timestamp         = 6;
 }
 
 message AlarmID{
@@ -170,5 +170,5 @@ message AlarmResponse {
 }
 
 message AlarmList {
-    repeated AlarmDescriptor alarm_descriptor = 1;
+  repeated AlarmDescriptor alarm_descriptor = 1;
 }
diff --git a/proto/optical_attack_detector.proto b/proto/optical_attack_detector.proto
index 783e23b35d754db983c75c56dadc203996beadd4..f74eea68b8c5a588f5ecc06a59916058cb8d9695 100644
--- a/proto/optical_attack_detector.proto
+++ b/proto/optical_attack_detector.proto
@@ -12,12 +12,11 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// protocol buffers documentation: https://developers.google.com/protocol-buffers/docs/proto3
 syntax = "proto3";
 package optical_attack_detector;
 
 import "context.proto";
-import "monitoring.proto";
+import "monitoring.proto"; // to be migrated to: "kpi_manager.proto"
 
 service OpticalAttackDetectorService {
   
@@ -28,5 +27,5 @@ service OpticalAttackDetectorService {
 
 message DetectionRequest {
   context.ServiceId service_id = 1;
-  monitoring.KpiId  kpi_id     = 2;
+  monitoring.KpiId  kpi_id     = 2; // to be migrated to: "kpi_manager.KpiId"
 }
diff --git a/proto/policy_condition.proto b/proto/policy_condition.proto
index add3ec1ab127674e171c366ffa49346892b3ff0d..612dcb1af8eb8adb0db65b8ae47301c87ad6b9ef 100644
--- a/proto/policy_condition.proto
+++ b/proto/policy_condition.proto
@@ -15,13 +15,13 @@
 syntax = "proto3";
 package policy;
 
-import "monitoring.proto";
+import "monitoring.proto"; // to be migrated to: "kpi_manager.proto"
 
 // Condition
 message PolicyRuleCondition {
-  monitoring.KpiId kpiId = 1;
-  NumericalOperator numericalOperator = 2;
-  monitoring.KpiValue kpiValue = 3;
+  monitoring.KpiId    kpiId             = 1;  // to be migrated to: "kpi_manager.KpiId"
+  NumericalOperator   numericalOperator = 2;
+  monitoring.KpiValue kpiValue          = 3;
 }
 
 // Operator to be used when comparing Kpis with condition values
diff --git a/proto/telemetry_frontend.proto b/proto/telemetry_frontend.proto
new file mode 100644
index 0000000000000000000000000000000000000000..dbc1e8bf688f9f2df341484c1929e2338c458bbf
--- /dev/null
+++ b/proto/telemetry_frontend.proto
@@ -0,0 +1,48 @@
+// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package telemetry_frontend;
+
+import "context.proto";
+import "kpi_manager.proto";
+
+service TelemetryFrontendService {
+  rpc StartCollector  (Collector      ) returns (CollectorId  ) {}
+  rpc StopCollector   (CollectorId    ) returns (context.Empty) {}
+  rpc SelectCollectors(CollectorFilter) returns (CollectorList) {}
+}
+
+message CollectorId {
+  context.Uuid collector_id = 1;
+}
+
+message Collector {
+  CollectorId       collector_id = 1; // The Collector ID
+  kpi_manager.KpiId kpi_id       = 2; // The KPI Id to be associated to the collected samples
+  float             duration_s   = 3; // Terminate data collection after duration[seconds]; duration==0 means indefinitely
+  float             interval_s   = 4; // Interval between collected samples
+}
+
+message CollectorFilter {
+  // Collector that fulfill the filter are those that match ALL the following fields.
+  // An empty list means: any value is accepted.
+  // All fields empty means: list all Collectors
+  repeated CollectorId       collector_id = 1;
+  repeated kpi_manager.KpiId kpi_id       = 2;
+}
+
+message CollectorList {
+  repeated Collector collector_list = 1;
+}
diff --git a/scripts/run_tests_locally-kpi-DB.sh b/scripts/run_tests_locally-kpi-DB.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d43be66e1e9843f85e34442d00b10a11d2903c43
--- /dev/null
+++ b/scripts/run_tests_locally-kpi-DB.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+# RCFILE=$PROJECTDIR/coverage/.coveragerc
+# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+#     kpi_manager/tests/test_unitary.py
+
+# python3 kpi_manager/tests/test_unitary.py
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
+    kpi_manager/tests/test_kpi_db.py
diff --git a/scripts/run_tests_locally-kpi-manager.sh b/scripts/run_tests_locally-kpi-manager.sh
new file mode 100755
index 0000000000000000000000000000000000000000..db6e786835bcd4550b53394aef23aaa670d43b08
--- /dev/null
+++ b/scripts/run_tests_locally-kpi-manager.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+# RCFILE=$PROJECTDIR/coverage/.coveragerc
+# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+#     kpi_manager/tests/test_unitary.py
+
+# python3 kpi_manager/tests/test_unitary.py
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
+    kpi_manager/tests/test_kpi_manager.py
diff --git a/scripts/run_tests_locally-kpi-prom-writer.sh b/scripts/run_tests_locally-kpi-prom-writer.sh
new file mode 100755
index 0000000000000000000000000000000000000000..1179cbf866969607b880eb41be5c25ab95007bfc
--- /dev/null
+++ b/scripts/run_tests_locally-kpi-prom-writer.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
+    kpi_value_writer/tests/test_metric_writer_to_prom.py
diff --git a/scripts/run_tests_locally-kpi-value-API.sh b/scripts/run_tests_locally-kpi-value-API.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8dfbfb16237634519dcae2fcc34f850a5188c1e7
--- /dev/null
+++ b/scripts/run_tests_locally-kpi-value-API.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
+python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG -o log_cli=true --verbose \
+    kpi_value_api/tests/test_kpi_value_api.py
diff --git a/scripts/run_tests_locally-kpi-value-writer.sh b/scripts/run_tests_locally-kpi-value-writer.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8faaeb6d895a240278d7ceb0c5c0b2855fa25910
--- /dev/null
+++ b/scripts/run_tests_locally-kpi-value-writer.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
+    kpi_value_writer/tests/test_kpi_value_writer.py
diff --git a/scripts/run_tests_locally-telemetry-DB.sh b/scripts/run_tests_locally-telemetry-DB.sh
new file mode 100755
index 0000000000000000000000000000000000000000..bb1c48b76440c00b398875a8f704c2a82ba4ab50
--- /dev/null
+++ b/scripts/run_tests_locally-telemetry-DB.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+# RCFILE=$PROJECTDIR/coverage/.coveragerc
+# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+#     kpi_manager/tests/test_unitary.py
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+python3 -m pytest --log-cli-level=INFO --verbose \
+    telemetry/database/tests/telemetryDBtests.py
diff --git a/scripts/run_tests_locally-telemetry-backend.sh b/scripts/run_tests_locally-telemetry-backend.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9cf404ffcef6c99b261f81eb0c6b910dd60845e5
--- /dev/null
+++ b/scripts/run_tests_locally-telemetry-backend.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+# RCFILE=$PROJECTDIR/coverage/.coveragerc
+# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+#     kpi_manager/tests/test_unitary.py
+
+# python3 kpi_manager/tests/test_unitary.py
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+python3 -m pytest --log-level=INFO --log-cli-level=INFO --verbose \
+    telemetry/backend/tests/testTelemetryBackend.py
diff --git a/scripts/run_tests_locally-telemetry-frontend.sh b/scripts/run_tests_locally-telemetry-frontend.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7652ccb583268285dcd2fcf3090b717dc18e4fc3
--- /dev/null
+++ b/scripts/run_tests_locally-telemetry-frontend.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+# RCFILE=$PROJECTDIR/coverage/.coveragerc
+# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+#     kpi_manager/tests/test_unitary.py
+
+# python3 kpi_manager/tests/test_unitary.py
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+python3 -m pytest --log-level=INFO --log-cli-level=INFO --verbose \
+    telemetry/frontend/tests/test_frontend.py
diff --git a/scripts/run_tests_locally-telemetry-mgtDB.sh b/scripts/run_tests_locally-telemetry-mgtDB.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8b68104eaf343b57ec4953334cda37167cca3529
--- /dev/null
+++ b/scripts/run_tests_locally-telemetry-mgtDB.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+# RCFILE=$PROJECTDIR/coverage/.coveragerc
+# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+#     kpi_manager/tests/test_unitary.py
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+python3 -m pytest --log-cli-level=INFO --verbose \
+    telemetry/database/tests/managementDBtests.py
diff --git a/scripts/show_logs_kpi_manager.sh b/scripts/show_logs_kpi_manager.sh
new file mode 100755
index 0000000000000000000000000000000000000000..86f084f69f6babf5a90957f432b214e35a08c461
--- /dev/null
+++ b/scripts/show_logs_kpi_manager.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/kpi-managerservice -c server
diff --git a/scripts/show_logs_kpi_value_api.sh b/scripts/show_logs_kpi_value_api.sh
new file mode 100755
index 0000000000000000000000000000000000000000..041ad7f1ffb1a218af00d5d142024a5063d109c3
--- /dev/null
+++ b/scripts/show_logs_kpi_value_api.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/kpi-value-apiservice -c server
diff --git a/scripts/show_logs_kpi_value_writer.sh b/scripts/show_logs_kpi_value_writer.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d62f3ea0a1a6961be4a5b6f4841c9ba4e1a89316
--- /dev/null
+++ b/scripts/show_logs_kpi_value_writer.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/kpi-value-writerservice -c server
diff --git a/scripts/show_logs_telemetry-DB.sh b/scripts/show_logs_telemetry-DB.sh
new file mode 100755
index 0000000000000000000000000000000000000000..84fc875d01e18eae9b144edaf220d5cb74017ea4
--- /dev/null
+++ b/scripts/show_logs_telemetry-DB.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"crdb"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs cockroachdb-0
diff --git a/src/common/Constants.py b/src/common/Constants.py
index a6a6899b0342c8e3efcde8e2e26897c1e4734cd3..767b21343f89e35c2338b522bcdc71c56aca1815 100644
--- a/src/common/Constants.py
+++ b/src/common/Constants.py
@@ -61,6 +61,10 @@ class ServiceNameEnum(Enum):
     E2EORCHESTRATOR        = 'e2e-orchestrator'
     OPTICALCONTROLLER      = 'opticalcontroller'
     BGPLS                  = 'bgpls-speaker'
+    KPIMANAGER             = 'kpi-manager'
+    KPIVALUEAPI            = 'kpi-value-api'
+    KPIVALUEWRITER         = 'kpi-value-writer'
+    TELEMETRYFRONTEND      = 'telemetry-frontend'
 
     # Used for test and debugging only
     DLT_GATEWAY    = 'dltgateway'
@@ -90,6 +94,10 @@ DEFAULT_SERVICE_GRPC_PORTS = {
     ServiceNameEnum.E2EORCHESTRATOR        .value : 10050,
     ServiceNameEnum.OPTICALCONTROLLER      .value : 10060,
     ServiceNameEnum.BGPLS                  .value : 20030,
+    ServiceNameEnum.KPIMANAGER             .value : 30010,
+    ServiceNameEnum.KPIVALUEAPI            .value : 30020,
+    ServiceNameEnum.KPIVALUEWRITER         .value : 30030,
+    ServiceNameEnum.TELEMETRYFRONTEND      .value : 30050,
 
     # Used for test and debugging only
     ServiceNameEnum.DLT_GATEWAY   .value : 50051,
diff --git a/src/common/tools/kafka/Variables.py b/src/common/tools/kafka/Variables.py
new file mode 100644
index 0000000000000000000000000000000000000000..24ae2cff7b5e710e18999eb09029216a4a5d6c8a
--- /dev/null
+++ b/src/common/tools/kafka/Variables.py
@@ -0,0 +1,74 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from enum import Enum
+from confluent_kafka import KafkaException
+from confluent_kafka.admin import AdminClient, NewTopic
+
+
+LOGGER = logging.getLogger(__name__)
+
+class KafkaConfig(Enum):
+    # SERVER_IP    = "127.0.0.1:9092"
+    SERVER_IP    = "kafka-service.kafka.svc.cluster.local:9092"
+    ADMIN_CLIENT =  AdminClient({'bootstrap.servers': SERVER_IP})
+
+class KafkaTopic(Enum):
+    REQUEST  = 'topic_request' 
+    RESPONSE = 'topic_response'
+    RAW      = 'topic_raw' 
+    LABELED  = 'topic_labeled'
+    VALUE    = 'topic_value'
+
+    @staticmethod
+    def create_all_topics() -> bool:
+        """
+            Method to create Kafka topics defined as class members
+        """
+        all_topics = [member.value for member in KafkaTopic]
+        if( KafkaTopic.create_new_topic_if_not_exists( all_topics )):
+            LOGGER.debug("All topics are created sucsessfully")
+            return True
+        else:
+            LOGGER.debug("Error creating all topics")
+            return False
+    
+    @staticmethod
+    def create_new_topic_if_not_exists(new_topics: list) -> bool:
+        """
+        Method to create Kafka topic if it does not exist.
+        Args:
+            list of topic: containing the topic name(s) to be created on Kafka
+        """
+        LOGGER.debug("Topics names to be verified and created: {:}".format(new_topics))
+        for topic in new_topics:
+            try:
+                topic_metadata = KafkaConfig.ADMIN_CLIENT.value.list_topics(timeout=5)
+                # LOGGER.debug("Existing topic list: {:}".format(topic_metadata.topics))
+                if topic not in topic_metadata.topics:
+                    # If the topic does not exist, create a new topic
+                    print("Topic {:} does not exist. Creating...".format(topic))
+                    LOGGER.debug("Topic {:} does not exist. Creating...".format(topic))
+                    new_topic = NewTopic(topic, num_partitions=1, replication_factor=1)
+                    KafkaConfig.ADMIN_CLIENT.value.create_topics([new_topic])
+                else:
+                    print("Topic name already exists: {:}".format(topic))
+                    LOGGER.debug("Topic name already exists: {:}".format(topic))
+            except Exception as e:
+                LOGGER.debug("Failed to create topic: {:}".format(e))
+                return False
+        return True
+
+# create all topics after the deployments (Telemetry and Analytics)
diff --git a/src/kpi_manager/.gitlab-ci.yml b/src/kpi_manager/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6aef328ea51e835eb06b286478f26d83f9a80f13
--- /dev/null
+++ b/src/kpi_manager/.gitlab-ci.yml
@@ -0,0 +1,133 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build kpi-manager:
+  variables:
+    IMAGE_NAME: 'kpi-manager' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
+    - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+
+# Apply unit test to the component
+unit_test kpi-manager:
+  variables:
+    IMAGE_NAME: 'kpi-manager' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build kpi-manager
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
+    - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi
+    - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi
+    - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi
+    - docker container prune -f
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker pull "cockroachdb/cockroach:latest-v22.2"
+    - docker volume create crdb
+    - >
+      docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080
+      --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123
+      --volume "crdb:/cockroach/cockroach-data"
+      cockroachdb/cockroach:latest-v22.2 start-single-node
+    - echo "Waiting for initialization..."
+    - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done
+    - docker logs crdb
+    - docker ps -a
+    - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $CRDB_ADDRESS
+    - NATS_ADDRESS=$(docker inspect nats --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $NATS_ADDRESS
+    - >
+      docker run --name $IMAGE_NAME -d -p 30010:30010
+      --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require"
+      --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+    - docker ps -a
+    - sleep 5
+    - docker logs $IMAGE_NAME
+    - >
+      docker exec -i $IMAGE_NAME bash -c
+      "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}_report.xml $IMAGE_NAME/tests/test_*.py"
+    - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+  coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    - docker volume rm -f crdb
+    - docker network rm teraflowbridge
+    - docker volume prune --force
+    - docker image prune --force
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - src/$IMAGE_NAME/tests/Dockerfile
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+  artifacts:
+      when: always
+      reports:
+        junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
+
+## Deployment of the service in Kubernetes Cluster
+#deploy context:
+#  variables:
+#    IMAGE_NAME: 'context' # name of the microservice
+#    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+#  stage: deploy
+#  needs:
+#    - unit test context
+#    # - integ_test execute
+#  script:
+#    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+#    - kubectl get all
+#  # environment:
+#  #   name: test
+#  #   url: https://example.com
+#  #   kubernetes:
+#  #     namespace: test
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual    
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
diff --git a/src/kpi_manager/Dockerfile b/src/kpi_manager/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..a57957759a32b45b715e327b54ebe004a6edf265
--- /dev/null
+++ b/src/kpi_manager/Dockerfile
@@ -0,0 +1,68 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ git && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/kpi_manager
+WORKDIR /var/teraflow/kpi_manager
+COPY src/kpi_manager/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/kpi_manager/. kpi_manager/
+
+# Start the service
+ENTRYPOINT ["python", "-m", "kpi_manager.service"]
diff --git a/src/kpi_manager/README.md b/src/kpi_manager/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..c1feadcc4843db26a219d1e3b37833ddd80b18dc
--- /dev/null
+++ b/src/kpi_manager/README.md
@@ -0,0 +1,29 @@
+# How to locally run and test KPI manager micro-service
+
+## --- File links need to be updated. ---
+### Pre-requisets 
+The following requirements should be fulfilled before the execuation of KPI management service.
+
+1. Verify that [kpi_management.proto](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/proto/kpi_management.proto) file exists and grpcs file are generated sucessfully. 
+2. Virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/requirements.in) are installed sucessfully.
+3. Verify the creation of required database and table.
+[KPI DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/database/tests/KpiDBtests.py) python file enlist the functions to create tables and database and
+[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/service/database/KpiEngine.py) contains the DB string, update the string as per your deployment.
+
+### Messages format templates
+["Messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_messages.py) python file enlist the basic gRPC messages format used during the testing.
+
+### Test file
+["KPI management test"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_kpi_manager.py) python file enlist different tests conducted during the experiment.
+
+### Flow of execution (Kpi Maanager Service functions)
+1. Call the `create_database()` and `create_tables()` functions from `Kpi_DB` class to create the required database and table if they don't exist. Call `verify_tables` to verify the existence of KPI table.
+
+2. Call the gRPC method `SetKpiDescriptor(KpiDescriptor)->KpiId` to add the KpiDescriptor in `Kpi` DB. `KpiDescriptor` and `KpiId` are both pre-defined gRPC message types.
+
+3. Call `GetKpiDescriptor(KpiId)->KpiDescriptor` to read the `KpiDescriptor` from DB and `DeleteKpiDescriptor(KpiId)` to delete the `KpiDescriptor` from DB.
+
+4. Call `SelectKpiDescriptor(KpiDescriptorFilter)->KpiDescriptorList` to get all `KpiDescriptor` objects that matches the filter criteria. `KpiDescriptorFilter` and `KpiDescriptorList` are pre-defined gRPC message types.
+
+## For KPI composer and KPI writer
+The functionalities of KPI composer and writer is heavily dependent upon Telemetery service. Therfore, these services has other pre-requsites that are mention [here](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/requirements.in).
diff --git a/src/kpi_manager/__init__.py b/src/kpi_manager/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_manager/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_manager/client/KpiManagerClient.py b/src/kpi_manager/client/KpiManagerClient.py
new file mode 100755
index 0000000000000000000000000000000000000000..672d82f2d78ea8b477429c5ba03fbb4331bae7c7
--- /dev/null
+++ b/src/kpi_manager/client/KpiManagerClient.py
@@ -0,0 +1,77 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
+
+from common.proto.context_pb2 import Empty
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.proto.kpi_manager_pb2_grpc import KpiManagerServiceStub
+from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 10
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class KpiManagerClient:
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.KPIMANAGER) 
+        if not port: port = get_service_port_grpc(ServiceNameEnum.KPIMANAGER) 
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
+
+        self.channel = None
+        self.stub = None
+        self.connect()
+        LOGGER.debug('Channel created')
+
+    def connect(self):
+        self.channel = grpc.insecure_channel(self.endpoint)
+        self.stub = KpiManagerServiceStub(self.channel)
+
+    def close(self):
+        if self.channel is not None: self.channel.close()
+        self.channel = None
+        self.stub = None
+
+    @RETRY_DECORATOR
+    def SetKpiDescriptor(self, request : KpiDescriptor) -> KpiId:
+        LOGGER.debug('SetKpiDescriptor: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SetKpiDescriptor(request)
+        LOGGER.debug('SetKpiDescriptor result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def DeleteKpiDescriptor(self,request : KpiId) -> Empty:
+        LOGGER.debug('DeleteKpiDescriptor: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.DeleteKpiDescriptor(request)
+        LOGGER.debug('DeleteKpiDescriptor result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def GetKpiDescriptor(self, request : KpiId) -> KpiDescriptor:
+        LOGGER.debug('GetKpiDescriptor: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.GetKpiDescriptor(request)
+        LOGGER.debug('GetKpiDescriptor result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def SelectKpiDescriptor(self, filter : KpiDescriptorFilter) -> KpiDescriptorList:
+        LOGGER.debug('SelectKpiDescriptor: {:s}'.format(grpc_message_to_json_string(filter)))
+        response = self.stub.SelectKpiDescriptor(filter)
+        LOGGER.debug('SelectKpiDescriptor result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
diff --git a/src/kpi_manager/client/__init__.py b/src/kpi_manager/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..48f7d354a2f3fe6e91bb79b3ca956f68c36ed9e3
--- /dev/null
+++ b/src/kpi_manager/client/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+# 
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_manager/database/KpiEngine.py b/src/kpi_manager/database/KpiEngine.py
new file mode 100644
index 0000000000000000000000000000000000000000..42bda9527dfcb1e5cef0e229d260fd67f4b4a6d5
--- /dev/null
+++ b/src/kpi_manager/database/KpiEngine.py
@@ -0,0 +1,44 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, sqlalchemy
+from common.Settings import get_setting
+
+LOGGER = logging.getLogger(__name__)
+
+# CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}'
+CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}'
+
+class KpiEngine:
+    @staticmethod
+    def get_engine() -> sqlalchemy.engine.Engine:
+        crdb_uri = get_setting('CRDB_URI', default=None)
+        if crdb_uri is None:
+            CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE')
+            CRDB_SQL_PORT  = get_setting('CRDB_SQL_PORT')
+            CRDB_DATABASE  = get_setting('CRDB_DATABASE')
+            CRDB_USERNAME  = get_setting('CRDB_USERNAME')
+            CRDB_PASSWORD  = get_setting('CRDB_PASSWORD')
+            CRDB_SSLMODE   = get_setting('CRDB_SSLMODE')
+        crdb_uri = CRDB_URI_TEMPLATE.format(
+                CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
+        # crdb_uri = CRDB_URI_TEMPLATE.format(
+        #         CRDB_USERNAME, CRDB_PASSWORD, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
+        try:
+            engine = sqlalchemy.create_engine(crdb_uri, echo=False)
+            LOGGER.info(' KpiDBmanager initalized with DB URL: {:}'.format(crdb_uri))
+        except: # pylint: disable=bare-except # pragma: no cover
+            LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri)))
+            return None # type: ignore
+        return engine 
diff --git a/src/kpi_manager/database/KpiModel.py b/src/kpi_manager/database/KpiModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c2fdff0664883bcc727096ddeda562fdbe3085d
--- /dev/null
+++ b/src/kpi_manager/database/KpiModel.py
@@ -0,0 +1,84 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy import Column, Integer, String, Text
+from sqlalchemy.orm import registry
+from common.proto.kpi_manager_pb2 import KpiDescriptor
+
+logging.basicConfig(level=logging.INFO)
+LOGGER = logging.getLogger(__name__)
+
+# Create a base class for declarative models
+Base = registry().generate_base()
+
+class Kpi(Base):
+    __tablename__ = 'kpi'
+
+    kpi_id          = Column(UUID(as_uuid=False), primary_key=True)
+    kpi_description = Column(Text               , nullable=False)
+    kpi_sample_type = Column(Integer            , nullable=False)
+    device_id       = Column(String             , nullable=False)
+    endpoint_id     = Column(String             , nullable=False)
+    service_id      = Column(String             , nullable=False)
+    slice_id        = Column(String             , nullable=False)
+    connection_id   = Column(String             , nullable=False)
+    link_id         = Column(String             , nullable=False)
+
+    # helps in logging the information
+    def __repr__(self):
+        return (f"<Kpi(kpi_id='{self.kpi_id}', kpi_description='{self.kpi_description}', "
+                f"kpi_sample_type='{self.kpi_sample_type}', device_id='{self.device_id}', "
+                f"endpoint_id='{self.endpoint_id}', service_id='{self.service_id}', "
+                f"slice_id='{self.slice_id}', connection_id='{self.connection_id}', "
+                f"link_id='{self.link_id}')>")
+
+    @classmethod
+    def convert_KpiDescriptor_to_row(cls, request):
+        """
+        Create an instance of Kpi from a request object.
+        Args:    request: The request object containing the data.
+        Returns: An instance of Kpi initialized with data from the request.
+        """
+        return cls(
+            kpi_id          = request.kpi_id.kpi_id.uuid,
+            kpi_description = request.kpi_description,
+            kpi_sample_type = request.kpi_sample_type,
+            device_id       = request.device_id.device_uuid.uuid,
+            endpoint_id     = request.endpoint_id.endpoint_uuid.uuid,
+            service_id      = request.service_id.service_uuid.uuid,
+            slice_id        = request.slice_id.slice_uuid.uuid,
+            connection_id   = request.connection_id.connection_uuid.uuid,
+            link_id         = request.link_id.link_uuid.uuid
+        )
+    
+    @classmethod
+    def convert_row_to_KpiDescriptor(cls, row):
+        """
+        Create and return a dictionary representation of a Kpi instance.       
+        Args:   row: The Kpi instance (row) containing the data.
+        Returns: KpiDescriptor object
+        """
+        response = KpiDescriptor()
+        response.kpi_id.kpi_id.uuid                 = row.kpi_id
+        response.kpi_description                    = row.kpi_description
+        response.kpi_sample_type                    = row.kpi_sample_type
+        response.service_id.service_uuid.uuid       = row.service_id
+        response.device_id.device_uuid.uuid         = row.device_id
+        response.slice_id.slice_uuid.uuid           = row.slice_id
+        response.endpoint_id.endpoint_uuid.uuid     = row.endpoint_id
+        response.connection_id.connection_uuid.uuid = row.connection_id
+        response.link_id.link_uuid.uuid             = row.link_id
+        return response
diff --git a/src/kpi_manager/database/Kpi_DB.py b/src/kpi_manager/database/Kpi_DB.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b2b586b6d72cf73ff3362f9240d1171e8a9974d
--- /dev/null
+++ b/src/kpi_manager/database/Kpi_DB.py
@@ -0,0 +1,154 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import sqlalchemy_utils
+from sqlalchemy.orm import sessionmaker
+from kpi_manager.database.KpiEngine import KpiEngine
+from kpi_manager.database.KpiModel import Kpi as KpiModel
+from common.method_wrappers.ServiceExceptions import ( 
+    AlreadyExistsException, OperationFailedException)
+
+LOGGER = logging.getLogger(__name__)
+DB_NAME = "kpi"
+
+class KpiDB:
+    def __init__(self):
+        self.db_engine = KpiEngine.get_engine()
+        if self.db_engine is None:
+            LOGGER.error('Unable to get SQLAlchemy DB Engine...')
+            return False
+        self.db_name = DB_NAME
+        self.Session = sessionmaker(bind=self.db_engine)
+
+    def create_database(self) -> None:
+        if not sqlalchemy_utils.database_exists(self.db_engine.url):
+            LOGGER.debug("Database created. {:}".format(self.db_engine.url))
+            sqlalchemy_utils.create_database(self.db_engine.url)
+
+    def drop_database(self) -> None:
+        if sqlalchemy_utils.database_exists(self.db_engine.url):
+            sqlalchemy_utils.drop_database(self.db_engine.url)
+
+    def create_tables(self):
+        try:
+            KpiModel.metadata.create_all(self.db_engine)     # type: ignore
+            LOGGER.debug("Tables created in the DB Name: {:}".format(self.db_name))
+        except Exception as e:
+            LOGGER.debug("Tables cannot be created in the kpi database. {:s}".format(str(e)))
+            raise OperationFailedException ("Tables can't be created", extra_details=["unable to create table {:}".format(e)])
+
+    def verify_tables(self):
+        try:
+            with self.db_engine.connect() as connection:
+                result = connection.execute("SHOW TABLES;")
+                tables = result.fetchall()      # type: ignore
+                LOGGER.debug("Tables verified: {:}".format(tables))
+        except Exception as e:
+            LOGGER.debug("Unable to fetch Table names. {:s}".format(str(e)))
+
+    def add_row_to_db(self, row):
+        session = self.Session()
+        try:
+            session.add(row)
+            session.commit()
+            LOGGER.debug(f"Row inserted into {row.__class__.__name__} table.")
+            return True
+        except Exception as e:
+            session.rollback()
+            if "psycopg2.errors.UniqueViolation" in str(e):
+                LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}")
+                raise AlreadyExistsException(row.__class__.__name__, row,
+                                             extra_details=["Unique key voilation: {:}".format(e)] )
+            else:
+                LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}")
+                raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)])
+        finally:
+            session.close()
+    
+    def search_db_row_by_id(self, model, col_name, id_to_search):
+        session = self.Session()
+        try:
+            entity = session.query(model).filter_by(**{col_name: id_to_search}).first()
+            if entity:
+                # LOGGER.debug(f"{model.__name__} ID found: {str(entity)}")
+                return entity
+            else:
+                LOGGER.debug(f"{model.__name__} ID not found: {str(id_to_search)}")
+                return None
+        except Exception as e:
+            session.rollback()
+            LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}")
+            raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)])
+        finally:
+            session.close()
+    
+    def delete_db_row_by_id(self, model, col_name, id_to_search):
+        session = self.Session()
+        try:
+            record = session.query(model).filter_by(**{col_name: id_to_search}).first()
+            if record:
+                session.delete(record)
+                session.commit()
+                LOGGER.debug("Deleted %s with %s: %s", model.__name__, col_name, id_to_search)
+            else:
+                LOGGER.debug("%s with %s %s not found", model.__name__, col_name, id_to_search)
+                return None
+        except Exception as e:
+            session.rollback()
+            LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e)
+            raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)])
+        finally:
+            session.close()
+
+    def select_with_filter(self, model, filter_object):
+        session = self.Session()
+        try:
+            query = session.query(KpiModel)
+            # Apply filters based on the filter_object
+            if filter_object.kpi_id:
+                query = query.filter(KpiModel.kpi_id.in_([k.kpi_id.uuid for k in filter_object.kpi_id]))
+
+            if filter_object.kpi_sample_type:
+                query = query.filter(KpiModel.kpi_sample_type.in_(filter_object.kpi_sample_type))
+
+            if filter_object.device_id:
+                query = query.filter(KpiModel.device_id.in_([d.device_uuid.uuid for d in filter_object.device_id]))
+
+            if filter_object.endpoint_id:
+                query = query.filter(KpiModel.endpoint_id.in_([e.endpoint_uuid.uuid for e in filter_object.endpoint_id]))
+
+            if filter_object.service_id:
+                query = query.filter(KpiModel.service_id.in_([s.service_uuid.uuid for s in filter_object.service_id]))
+
+            if filter_object.slice_id:
+                query = query.filter(KpiModel.slice_id.in_([s.slice_uuid.uuid for s in filter_object.slice_id]))
+
+            if filter_object.connection_id:
+                query = query.filter(KpiModel.connection_id.in_([c.connection_uuid.uuid for c in filter_object.connection_id]))
+
+            if filter_object.link_id:
+                query = query.filter(KpiModel.link_id.in_([l.link_uuid.uuid for l in filter_object.link_id]))
+            result = query.all()
+            
+            if result:
+                LOGGER.debug(f"Fetched filtered rows from {model.__name__} table with filters: {filter_object}") #  - Results: {result}
+            else:
+                LOGGER.debug(f"No matching row found in {model.__name__} table with filters: {filter_object}")
+            return result
+        except Exception as e:
+            LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filter_object} ::: {e}")
+            raise OperationFailedException ("Select by filter", extra_details=["unable to apply the filter {:}".format(e)])
+        finally:
+            session.close()
diff --git a/src/kpi_manager/database/__init__.py b/src/kpi_manager/database/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_manager/database/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_manager/requirements.in b/src/kpi_manager/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..3e98fef362277dbf60019902e115d1c733bea9e7
--- /dev/null
+++ b/src/kpi_manager/requirements.in
@@ -0,0 +1,18 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+psycopg2-binary==2.9.*
+SQLAlchemy==1.4.*
+sqlalchemy-cockroachdb==1.4.*
+SQLAlchemy-Utils==0.38.*
diff --git a/src/kpi_manager/service/KpiManagerService.py b/src/kpi_manager/service/KpiManagerService.py
new file mode 100755
index 0000000000000000000000000000000000000000..b69a926a94c6cf10a680fe1b15d065f6bc073c97
--- /dev/null
+++ b/src/kpi_manager/service/KpiManagerService.py
@@ -0,0 +1,29 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from common.proto.kpi_manager_pb2_grpc import add_KpiManagerServiceServicer_to_server
+from kpi_manager.service.KpiManagerServiceServicerImpl import KpiManagerServiceServicerImpl
+
+
+class KpiManagerService(GenericGrpcService):
+    def __init__(self, cls_name: str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.KPIMANAGER)
+        super().__init__(port, cls_name=cls_name)
+        self.kpiManagerService_servicer = KpiManagerServiceServicerImpl()
+
+    def install_servicers(self):
+        add_KpiManagerServiceServicer_to_server(self.kpiManagerService_servicer, self.server)
diff --git a/src/kpi_manager/service/KpiManagerServiceServicerImpl.py b/src/kpi_manager/service/KpiManagerServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..05292fc5b14feaf079cc7691c650775965cc9148
--- /dev/null
+++ b/src/kpi_manager/service/KpiManagerServiceServicerImpl.py
@@ -0,0 +1,94 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import logging, grpc
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+from common.proto.context_pb2 import Empty
+from common.proto.kpi_manager_pb2_grpc import KpiManagerServiceServicer
+from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList
+from kpi_manager.database.Kpi_DB import KpiDB
+from kpi_manager.database.KpiModel import Kpi as KpiModel
+
+LOGGER = logging.getLogger(__name__)
+METRICS_POOL = MetricsPool('KpiManager', 'NBIgRPC')
+
+class KpiManagerServiceServicerImpl(KpiManagerServiceServicer):
+    def __init__(self):
+        LOGGER.info('Init KpiManagerService')
+        self.kpi_db_obj = KpiDB()
+    
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetKpiDescriptor(self, request: KpiDescriptor, grpc_context: grpc.ServicerContext # type: ignore
+                        ) -> KpiId: # type: ignore
+        response = KpiId()
+        LOGGER.info("Received gRPC message object: {:}".format(request))
+        try:
+            kpi_to_insert = KpiModel.convert_KpiDescriptor_to_row(request)
+            if(self.kpi_db_obj.add_row_to_db(kpi_to_insert)):
+                response.kpi_id.uuid = request.kpi_id.kpi_id.uuid
+                # LOGGER.info("Added Row: {:}".format(response))
+            return response
+        except Exception as e:
+            LOGGER.info("Unable to create KpiModel class object. {:}".format(e))
+    
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)        
+    def GetKpiDescriptor(self, request: KpiId, grpc_context: grpc.ServicerContext # type: ignore
+                         ) -> KpiDescriptor: # type: ignore
+        response = KpiDescriptor()
+        print("--> Received gRPC message object: {:}".format(request))
+        LOGGER.info("Received gRPC message object: {:}".format(request))
+        try: 
+            kpi_id_to_search = request.kpi_id.uuid
+            row = self.kpi_db_obj.search_db_row_by_id(KpiModel, 'kpi_id', kpi_id_to_search)
+            if row is not None:
+                response = KpiModel.convert_row_to_KpiDescriptor(row)
+                return response
+            if row is None:
+                print ('No matching row found for kpi id: {:}'.format(kpi_id_to_search))
+                LOGGER.info('No matching row found kpi id: {:}'.format(kpi_id_to_search))
+                return Empty()
+        except Exception as e:
+            print ('Unable to search kpi id. {:}'.format(e))
+            LOGGER.info('Unable to search kpi id. {:}'.format(e))
+            raise e
+    
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def DeleteKpiDescriptor(self, request: KpiId, grpc_context: grpc.ServicerContext # type: ignore
+                            ) -> Empty: # type: ignore
+        LOGGER.info("Received gRPC message object: {:}".format(request))
+        try:
+            kpi_id_to_search = request.kpi_id.uuid
+            self.kpi_db_obj.delete_db_row_by_id(KpiModel, 'kpi_id', kpi_id_to_search)
+        except Exception as e:
+            LOGGER.info('Unable to search kpi id. {:}'.format(e))
+        finally:
+            return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectKpiDescriptor(self, filter: KpiDescriptorFilter, grpc_context: grpc.ServicerContext # type: ignore
+                            ) -> KpiDescriptorList: # type: ignore
+        LOGGER.info("Received gRPC message object: {:}".format(filter))
+        response = KpiDescriptorList()
+        try:
+            rows = self.kpi_db_obj.select_with_filter(KpiModel, filter)
+        except Exception as e:
+            LOGGER.info('Unable to apply filter on kpi descriptor. {:}'.format(e))
+        try:
+            for row in rows:
+                kpiDescriptor_obj = KpiModel.convert_row_to_KpiDescriptor(row)
+                response.kpi_descriptor_list.append(kpiDescriptor_obj)
+            return response
+        except Exception as e:
+            LOGGER.info('Unable to process filter response {:}'.format(e))
diff --git a/src/kpi_manager/service/__init__.py b/src/kpi_manager/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_manager/service/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_manager/service/__main__.py b/src/kpi_manager/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..244d5afa373a6462a0382a0ed26a588088a689a1
--- /dev/null
+++ b/src/kpi_manager/service/__main__.py
@@ -0,0 +1,51 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from common.Settings import get_log_level
+from .KpiManagerService import KpiManagerService
+
+terminate = threading.Event()
+LOGGER = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global LOGGER # pylint: disable=global-statement
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level)
+    LOGGER = logging.getLogger(__name__)
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.debug('Starting...')
+
+    grpc_service = KpiManagerService()
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=1.0): pass
+
+    LOGGER.debug('Terminating...')
+    grpc_service.stop()
+
+    LOGGER.debug('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/kpi_manager/tests/test_kpi_db.py b/src/kpi_manager/tests/test_kpi_db.py
new file mode 100644
index 0000000000000000000000000000000000000000..e961c12bacdbac07f111b229435ed3d89d62581f
--- /dev/null
+++ b/src/kpi_manager/tests/test_kpi_db.py
@@ -0,0 +1,28 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import logging
+from kpi_manager.database.Kpi_DB import KpiDB
+
+LOGGER = logging.getLogger(__name__)
+
+def test_verify_databases_and_Tables():
+    LOGGER.info('>>> test_verify_Tables : START <<< ')
+    kpiDBobj = KpiDB()
+    kpiDBobj.drop_database()
+    kpiDBobj.verify_tables()
+    kpiDBobj.create_database()
+    kpiDBobj.create_tables()
+    kpiDBobj.verify_tables()
diff --git a/src/kpi_manager/tests/test_kpi_manager.py b/src/kpi_manager/tests/test_kpi_manager.py
new file mode 100755
index 0000000000000000000000000000000000000000..b41e5139db85cfc462cff1e9545fbc8476c67939
--- /dev/null
+++ b/src/kpi_manager/tests/test_kpi_manager.py
@@ -0,0 +1,301 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os, pytest
+import logging
+from typing import Union
+
+#from common.proto.context_pb2 import  Empty
+from common.Constants import ServiceNameEnum
+from common.Settings import ( 
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
+from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
+from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
+
+from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList
+from common.tools.service.GenericGrpcService import GenericGrpcService
+#from context.client.ContextClient import ContextClient
+
+# from device.service.driver_api.DriverFactory import DriverFactory
+# from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
+# from device.service.DeviceService import DeviceService
+# from device.client.DeviceClient import DeviceClient
+
+from kpi_manager.tests.test_messages import create_kpi_descriptor_request, create_kpi_filter_request, create_kpi_descriptor_request_a
+from kpi_manager.service.KpiManagerService import KpiManagerService
+from kpi_manager.client.KpiManagerClient import KpiManagerClient
+from kpi_manager.tests.test_messages import create_kpi_descriptor_request
+from kpi_value_writer.tests.test_messages import create_kpi_id_request
+
+
+#from monitoring.service.NameMapping import NameMapping
+
+#os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE'
+#from device.service.drivers import DRIVERS
+
+###########################
+# Tests Setup
+###########################
+
+LOCAL_HOST = '127.0.0.1'
+
+KPIMANAGER_SERVICE_PORT = get_service_port_grpc(ServiceNameEnum.KPIMANAGER)  # type: ignore
+os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(KPIMANAGER_SERVICE_PORT)
+
+# METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME'){}
+
+LOGGER = logging.getLogger(__name__)
+
+class MockContextService(GenericGrpcService):
+    # Mock Service implementing Context to simplify unitary tests of Monitoring
+
+    def __init__(self, bind_port: Union[str, int]) -> None:
+        super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService')
+
+    # pylint: disable=attribute-defined-outside-init
+    def install_servicers(self):
+        self.context_servicer = MockServicerImpl_Context()
+        add_ContextServiceServicer_to_server(self.context_servicer, self.server)
+
+# @pytest.fixture(scope='session')
+# def context_service():
+#     LOGGER.info('Initializing MockContextService...')
+#     _service = MockContextService(MOCKSERVICE_PORT)
+#     _service.start()
+    
+#     LOGGER.info('Yielding MockContextService...')
+#     yield _service
+
+#     LOGGER.info('Terminating MockContextService...')
+#     _service.context_servicer.msg_broker.terminate()
+#     _service.stop()
+
+#     LOGGER.info('Terminated MockContextService...')
+
+# @pytest.fixture(scope='session')
+# def context_client(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument
+#     LOGGER.info('Initializing ContextClient...')
+#     _client = ContextClient()
+    
+#     LOGGER.info('Yielding ContextClient...')
+#     yield _client
+
+#     LOGGER.info('Closing ContextClient...')
+#     _client.close()
+
+#     LOGGER.info('Closed ContextClient...')
+
+# @pytest.fixture(scope='session')
+# def device_service(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument
+#     LOGGER.info('Initializing DeviceService...')
+#     driver_factory = DriverFactory(DRIVERS)
+#     driver_instance_cache = DriverInstanceCache(driver_factory)
+#     _service = DeviceService(driver_instance_cache)
+#     _service.start()
+
+#     # yield the server, when test finishes, execution will resume to stop it
+#     LOGGER.info('Yielding DeviceService...')
+#     yield _service
+
+#     LOGGER.info('Terminating DeviceService...')
+#     _service.stop()
+
+#     LOGGER.info('Terminated DeviceService...')
+
+# @pytest.fixture(scope='session')
+# def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument
+#     LOGGER.info('Initializing DeviceClient...')
+#     _client = DeviceClient()
+
+#     LOGGER.info('Yielding DeviceClient...')
+#     yield _client
+
+#     LOGGER.info('Closing DeviceClient...')
+#     _client.close()
+
+#     LOGGER.info('Closed DeviceClient...')
+
+# @pytest.fixture(scope='session')
+# def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument
+#     LOGGER.info('Initializing DeviceClient...')
+#     _client = DeviceClient()
+
+#     LOGGER.info('Yielding DeviceClient...')
+#     yield _client
+
+#     LOGGER.info('Closing DeviceClient...')
+#     _client.close()
+
+#     LOGGER.info('Closed DeviceClient...')
+
+# This fixture will be requested by test cases and last during testing session
+@pytest.fixture(scope='session')
+def kpi_manager_service():
+    LOGGER.info('Initializing KpiManagerService...')
+    #name_mapping = NameMapping()
+    # _service = MonitoringService(name_mapping)
+    # _service = KpiManagerService(name_mapping)
+    _service = KpiManagerService()
+    _service.start()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding KpiManagerService...')
+    yield _service
+
+    LOGGER.info('Terminating KpiManagerService...')
+    _service.stop()
+
+    LOGGER.info('Terminated KpiManagerService...')
+
+# This fixture will be requested by test cases and last during testing session.
+# The client requires the server, so client fixture has the server as dependency.
+# def monitoring_client(monitoring_service : MonitoringService): (Add for better understanding)
+@pytest.fixture(scope='session')
+def kpi_manager_client(kpi_manager_service : KpiManagerService): # pylint: disable=redefined-outer-name,unused-argument
+    LOGGER.info('Initializing KpiManagerClient...')
+    _client = KpiManagerClient()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding KpiManagerClient...')
+    yield _client
+
+    LOGGER.info('Closing KpiManagerClient...')
+    _client.close()
+
+    LOGGER.info('Closed KpiManagerClient...')
+
+##################################################
+# Prepare Environment, should be the first test
+##################################################
+
+# # ERROR on this test --- 
+# def test_prepare_environment(
+#     context_client : ContextClient,                 # pylint: disable=redefined-outer-name,unused-argument
+# ):
+#     context_id = json_context_id(DEFAULT_CONTEXT_NAME)
+#     context_client.SetContext(Context(**json_context(DEFAULT_CONTEXT_NAME)))
+#     context_client.SetTopology(Topology(**json_topology(DEFAULT_TOPOLOGY_NAME, context_id=context_id)))
+
+###########################
+# Tests Implementation of Kpi Manager
+###########################
+
+# ---------- 3rd Iteration Tests ----------------
+# def test_SetKpiDescriptor(kpi_manager_client):
+#     LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ")
+#     response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+#     LOGGER.info("Response gRPC message object: {:}".format(response))
+#     assert isinstance(response, KpiId)
+
+# def test_DeleteKpiDescriptor(kpi_manager_client):
+#     LOGGER.info(" >>> test_DeleteKpiDescriptor: START <<< ")
+#     # adding KPI
+#     response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+#     # deleting KPI
+#     del_response = kpi_manager_client.DeleteKpiDescriptor(response_id)
+#     # select KPI
+#     kpi_manager_client.GetKpiDescriptor(response_id)
+#     LOGGER.info("Response of delete method gRPC message object: {:}".format(del_response))
+#     assert isinstance(del_response, Empty)
+
+def test_GetKpiDescriptor(kpi_manager_client):
+    LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ")
+    # adding KPI
+    response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+    # get KPI
+    response = kpi_manager_client.GetKpiDescriptor(response_id)
+    LOGGER.info("Response gRPC message object: {:}".format(response))
+
+    LOGGER.info(" >>> calling GetKpiDescriptor with random ID")
+    rand_response = kpi_manager_client.GetKpiDescriptor(create_kpi_id_request())
+    LOGGER.info("Response gRPC message object: {:}".format(rand_response))
+
+    assert isinstance(response, KpiDescriptor)
+
+# def test_SelectKpiDescriptor(kpi_manager_client):
+#     LOGGER.info(" >>> test_SelectKpiDescriptor: START <<< ")
+#     # adding KPI
+#     kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+#     # select KPI(s)    
+#     response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request())
+#     LOGGER.info("Response gRPC message object: {:}".format(response))
+#     assert isinstance(response, KpiDescriptorList)
+
+# def test_set_list_of_KPIs(kpi_manager_client):
+#     LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ")
+#     KPIs_TO_SEARCH = ["node_in_power_total", "node_in_current_total", "node_out_power_total"]
+#     # adding KPI
+#     for kpi in KPIs_TO_SEARCH:
+#        kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(kpi))
+    
+
+# ---------- 2nd Iteration Tests -----------------
+# def test_SetKpiDescriptor(kpi_manager_client):
+#     LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ")
+#     with open("kpi_manager/tests/KPI_configs.json", 'r') as file:
+#         data = json.load(file)
+#         _descriptors = data.get('KPIs', [])
+#     for _descritor_name in _descriptors:
+#         response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(_descritor_name))
+#         LOGGER.info("Response gRPC message object: {:}".format(response))
+#     assert isinstance(response, KpiId)
+
+# def test_GetKpiDescriptor(kpi_manager_client):
+#     LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ")
+#     response = kpi_manager_client.GetKpiDescriptor(create_kpi_id_request())
+#     LOGGER.info("Response gRPC message object: {:}".format(response))
+#     assert isinstance(response, KpiDescriptor)
+
+# def test_DeleteKpiDescriptor(kpi_manager_client):
+#     LOGGER.info(" >>> test_DeleteKpiDescriptor: START <<< ")
+#     response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+#     del_response = kpi_manager_client.DeleteKpiDescriptor(response)
+#     kpi_manager_client.GetKpiDescriptor(response)
+#     LOGGER.info("Response of delete method gRPC message object: {:}".format(del_response))
+#     assert isinstance(del_response, Empty)
+
+# def test_SelectKpiDescriptor(kpi_manager_client):
+#     LOGGER.info(" >>> test_SelectKpiDescriptor: START <<< ")
+#     kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a())
+#     response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request_a())
+#     LOGGER.info("Response gRPC message object: {:}".format(response))
+#     assert isinstance(response, KpiDescriptorList)
+
+# ------------- INITIAL TESTs ----------------
+# Test case that makes use of client fixture to test server's CreateKpi method
+# def test_set_kpi(kpi_manager_client): # pylint: disable=redefined-outer-name
+#     # make call to server
+#     LOGGER.warning('test_create_kpi requesting')
+#     for i in range(3):
+#         response = kpi_manager_client.SetKpiDescriptor(create_kpi_request(str(i+1)))
+#         LOGGER.debug(str(response))
+#         assert isinstance(response, KpiId)
+
+# # Test case that makes use of client fixture to test server's DeleteKpi method
+# def test_delete_kpi(kpi_manager_client): # pylint: disable=redefined-outer-name
+#     # make call to server
+#     LOGGER.warning('delete_kpi requesting')
+#     response = kpi_manager_client.SetKpiDescriptor(create_kpi_request('4'))
+#     response = kpi_manager_client.DeleteKpiDescriptor(response)
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, Empty)
+
+# # Test case that makes use of client fixture to test server's GetKpiDescriptor method
+# def test_select_kpi_descriptor(kpi_manager_client): # pylint: disable=redefined-outer-name
+#     LOGGER.warning('test_selectkpidescritor begin')
+#     response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request())
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, KpiDescriptorList)
diff --git a/src/kpi_manager/tests/test_messages.py b/src/kpi_manager/tests/test_messages.py
new file mode 100644
index 0000000000000000000000000000000000000000..870660658a2808fc6db2e98a140497980022e5a7
--- /dev/null
+++ b/src/kpi_manager/tests/test_messages.py
@@ -0,0 +1,72 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid
+from common.proto import kpi_manager_pb2
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.proto.context_pb2 import DeviceId, LinkId, ServiceId, SliceId,\
+                             ConnectionId, EndPointId
+
+def create_kpi_descriptor_request(descriptor_name: str = "Test_name"):
+    _create_kpi_request                                    = kpi_manager_pb2.KpiDescriptor()
+    _create_kpi_request.kpi_id.kpi_id.uuid                 = str(uuid.uuid4())
+    _create_kpi_request.kpi_description                    = descriptor_name
+    _create_kpi_request.kpi_sample_type                    = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
+    _create_kpi_request.device_id.device_uuid.uuid         = 'DEV2' 
+    _create_kpi_request.service_id.service_uuid.uuid       = 'SERV2'
+    _create_kpi_request.slice_id.slice_uuid.uuid           = 'SLC1' 
+    _create_kpi_request.endpoint_id.endpoint_uuid.uuid     = 'END1' 
+    _create_kpi_request.connection_id.connection_uuid.uuid = 'CON1' 
+    _create_kpi_request.link_id.link_uuid.uuid             = 'LNK1' 
+    return _create_kpi_request
+
+def create_kpi_descriptor_request_a(description: str = "Test Description"):
+    _create_kpi_request                                    = kpi_manager_pb2.KpiDescriptor()
+    _create_kpi_request.kpi_id.kpi_id.uuid                 = str(uuid.uuid4())
+    _create_kpi_request.kpi_description                    = description
+    _create_kpi_request.kpi_sample_type                    = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
+    _create_kpi_request.device_id.device_uuid.uuid         = 'DEV4' 
+    _create_kpi_request.service_id.service_uuid.uuid       = 'SERV3'
+    _create_kpi_request.slice_id.slice_uuid.uuid           = 'SLC3' 
+    _create_kpi_request.endpoint_id.endpoint_uuid.uuid     = 'END2' 
+    _create_kpi_request.connection_id.connection_uuid.uuid = 'CON2' 
+    _create_kpi_request.link_id.link_uuid.uuid             = 'LNK2' 
+    return _create_kpi_request
+
+def create_kpi_filter_request():
+    _create_kpi_filter_request = kpi_manager_pb2.KpiDescriptorFilter()
+    _create_kpi_filter_request.kpi_sample_type.append(KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED)
+
+    device_id_obj     = DeviceId()
+    service_id_obj    = ServiceId()
+    slice_id_obj      = SliceId()
+    endpoint_id_obj   = EndPointId()
+    connection_id_obj = ConnectionId()
+    link_id_obj       = LinkId()
+
+    device_id_obj.device_uuid.uuid         = "DEV2"
+    service_id_obj.service_uuid.uuid       = "SERV2"
+    slice_id_obj.slice_uuid.uuid           = "SLC1"
+    endpoint_id_obj.endpoint_uuid.uuid     = "END1"
+    connection_id_obj.connection_uuid.uuid = "CON1"
+    link_id_obj.link_uuid.uuid             = "LNK1"
+
+    _create_kpi_filter_request.device_id.append(device_id_obj)
+    _create_kpi_filter_request.service_id.append(service_id_obj)
+    _create_kpi_filter_request.slice_id.append(slice_id_obj)
+    _create_kpi_filter_request.endpoint_id.append(endpoint_id_obj)
+    _create_kpi_filter_request.connection_id.append(connection_id_obj)
+    _create_kpi_filter_request.link_id.append(link_id_obj)
+
+    return _create_kpi_filter_request
\ No newline at end of file
diff --git a/src/kpi_value_api/.gitlab-ci.yml b/src/kpi_value_api/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c9107abaac83199cc18ea720e4e3e5e14e353189
--- /dev/null
+++ b/src/kpi_value_api/.gitlab-ci.yml
@@ -0,0 +1,109 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build kpi-value-api:
+  variables:
+    IMAGE_NAME: 'kpi-value-api' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
+    - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+
+# Apply unit test to the component
+unit_test kpi-value-api:
+  variables:
+    IMAGE_NAME: 'kpi-value-api' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build kpi-value-api
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
+    - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi
+    - docker container prune -f
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker run --name $IMAGE_NAME -d -p 30020:30020 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+    - sleep 5
+    - docker ps -a
+    - docker logs $IMAGE_NAME
+    - >
+      docker exec -i $IMAGE_NAME bash -c
+      "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}_report.xml $IMAGE_NAME/tests/test_*.py"
+    - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+  coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    - docker rm -f $IMAGE_NAME
+    - docker network rm teraflowbridge
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - src/$IMAGE_NAME/tests/Dockerfile
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+  artifacts:
+      when: always
+      reports:
+        junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
+
+## Deployment of the service in Kubernetes Cluster
+#deploy context:
+#  variables:
+#    IMAGE_NAME: 'context' # name of the microservice
+#    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+#  stage: deploy
+#  needs:
+#    - unit test context
+#    # - integ_test execute
+#  script:
+#    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+#    - kubectl get all
+#  # environment:
+#  #   name: test
+#  #   url: https://example.com
+#  #   kubernetes:
+#  #     namespace: test
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual    
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
diff --git a/src/kpi_value_api/Dockerfile b/src/kpi_value_api/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..7dd8d307b8338c4a29e97c742ca12a49c4611e0a
--- /dev/null
+++ b/src/kpi_value_api/Dockerfile
@@ -0,0 +1,68 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ git && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/kpi_value_api
+WORKDIR /var/teraflow/kpi_value_api
+COPY src/kpi_value_api/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/kpi_value_api/. kpi_value_api/
+
+# Start the service
+ENTRYPOINT ["python", "-m", "kpi_value_api.service"]
diff --git a/src/kpi_value_api/__init__.py b/src/kpi_value_api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_value_api/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_value_api/client/KpiValueApiClient.py b/src/kpi_value_api/client/KpiValueApiClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..f432271cfb7c8136f72156330b25d0b82b934d99
--- /dev/null
+++ b/src/kpi_value_api/client/KpiValueApiClient.py
@@ -0,0 +1,63 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.tools.grpc.Tools import grpc_message_to_json_string
+
+from common.proto.context_pb2 import Empty
+from common.proto.kpi_value_api_pb2 import KpiValueList, KpiValueFilter
+from common.proto.kpi_value_api_pb2_grpc import KpiValueAPIServiceStub
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 10
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class KpiValueApiClient:
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.KPIVALUEAPI) 
+        if not port: port = get_service_port_grpc(ServiceNameEnum.KPIVALUEAPI)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
+        self.channel = None
+        self.stub = None
+        self.connect()
+        LOGGER.debug('Channel created')
+
+    def connect(self):
+        self.channel = grpc.insecure_channel(self.endpoint)
+        self.stub    = KpiValueAPIServiceStub(self.channel)
+
+    def close(self):
+        if self.channel is not None: self.channel.close()
+        self.channel = None
+        self.stub = None
+    
+    @RETRY_DECORATOR
+    def StoreKpiValues(self, request: KpiValueList) -> Empty:
+        LOGGER.debug('StoreKpiValues: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.StoreKpiValues(request)
+        LOGGER.debug('StoreKpiValues result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+        
+    @RETRY_DECORATOR
+    def SelectKpiValues(self, request: KpiValueFilter) -> KpiValueList:
+        LOGGER.debug('SelectKpiValues: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SelectKpiValues(request)
+        LOGGER.debug('SelectKpiValues result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
diff --git a/src/kpi_value_api/client/__init__.py b/src/kpi_value_api/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_value_api/client/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_value_api/requirements.in b/src/kpi_value_api/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..7e4694109dc4e1d31b86abfc03162494faafcdaf
--- /dev/null
+++ b/src/kpi_value_api/requirements.in
@@ -0,0 +1,16 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+confluent-kafka==2.3.*
+requests==2.27.*
diff --git a/src/kpi_value_api/service/KpiValueApiService.py b/src/kpi_value_api/service/KpiValueApiService.py
new file mode 100644
index 0000000000000000000000000000000000000000..68b6fbdc278a00aa7cf98385bcf8afa573f91445
--- /dev/null
+++ b/src/kpi_value_api/service/KpiValueApiService.py
@@ -0,0 +1,30 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from .KpiValueApiServiceServicerImpl import KpiValueApiServiceServicerImpl
+from common.proto.kpi_value_api_pb2_grpc import add_KpiValueAPIServiceServicer_to_server
+
+
+class KpiValueApiService(GenericGrpcService):
+    def __init__(self, cls_name : str = __name__ ) -> None:
+       port = get_service_port_grpc(ServiceNameEnum.KPIVALUEAPI)
+       super().__init__(port, cls_name=cls_name)
+       self.kpiValueApiService_servicer = KpiValueApiServiceServicerImpl()
+    
+    def install_servicers(self):
+        add_KpiValueAPIServiceServicer_to_server(self.kpiValueApiService_servicer, self.server)
diff --git a/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py b/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..d27de54f3cddfd0d70d656a89c45adc50e518289
--- /dev/null
+++ b/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py
@@ -0,0 +1,118 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, grpc, requests
+from typing import Tuple, Any
+from datetime import datetime
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+from common.tools.kafka.Variables import KafkaConfig, KafkaTopic
+
+from common.proto.context_pb2 import Empty
+from common.proto.kpi_value_api_pb2_grpc import KpiValueAPIServiceServicer
+from common.proto.kpi_value_api_pb2 import KpiValueList, KpiValueFilter, KpiValue, KpiValueType
+
+from confluent_kafka import Producer as KafkaProducer
+
+
+LOGGER       = logging.getLogger(__name__)
+METRICS_POOL = MetricsPool('KpiValueAPI', 'NBIgRPC')
+PROM_URL     = "http://localhost:9090"
+
+class KpiValueApiServiceServicerImpl(KpiValueAPIServiceServicer):
+    def __init__(self):
+        LOGGER.debug('Init KpiValueApiService')
+    
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def StoreKpiValues(self, request: KpiValueList, grpc_context: grpc.ServicerContext
+                       ) -> Empty:
+        LOGGER.debug('StoreKpiValues: Received gRPC message object: {:}'.format(request))
+        producer_obj = KafkaProducer({
+            'bootstrap.servers' : KafkaConfig.SERVER_IP.value    
+        })
+        for kpi_value in request.kpi_value_list:
+            kpi_value_to_produce : Tuple [str, Any, Any] = (
+                kpi_value.kpi_id.kpi_id,            
+                kpi_value.timestamp,                
+                kpi_value.kpi_value_type            # kpi_value.kpi_value_type.(many options) how?
+            )
+            LOGGER.debug('KPI to produce is {:}'.format(kpi_value_to_produce))
+            msg_key = "gRPC-kpivalueapi"        # str(__class__.__name__) can be used
+        
+            producer_obj.produce(
+                KafkaTopic.VALUE.value, 
+                key      = msg_key,
+                value    = kpi_value.SerializeToString(),      # value = json.dumps(kpi_value_to_produce),
+                callback = self.delivery_callback
+            )
+            producer_obj.flush()
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectKpiValues(self, request: KpiValueFilter, grpc_context: grpc.ServicerContext
+                        ) -> KpiValueList:
+        LOGGER.debug('StoreKpiValues: Received gRPC message object: {:}'.format(request))
+        response = KpiValueList()
+        metrics          = [kpi.kpi_id for kpi in request.kpi_id]
+        start_timestamps = [timestamp for timestamp in request.start_timestamp]
+        end_timestamps   = [timestamp for timestamp in request.end_timestamp]
+        results = []
+
+        for start, end in zip(start_timestamps, end_timestamps):
+            start_str = datetime.fromtimestamp(start.seconds).isoformat() + "Z"
+            end_str = datetime.fromtimestamp(end.seconds).isoformat() + "Z"
+
+            for metric in metrics:
+                url    = f'{PROM_URL}/api/v1/query_range'
+                params = {
+                    'query': metric,
+                    'start': start_str,
+                    'end'  : end_str,
+                    'step' : '30s'           # or any other step you need
+                }
+                response = requests.get(url, params=params)
+                if response.status_code == 200:
+                    data = response.json()
+                    for result in data['data']['result']:
+                        for value in result['values']:
+                            kpi_value = KpiValue(
+                                kpi_id=metric,
+                                timestamp=str(seconds=value[0]),
+                                kpi_value_type=self._convert_value_to_kpi_value_type(value[1])
+                            )
+                            results.append(kpi_value)
+
+    def _convert_value_to_kpi_value_type(self, value):
+        # Check if the value is an integer (int64)
+        try:
+            int64_value = int(value)
+            return KpiValueType(int64Val=int64_value)
+        except ValueError:
+            pass
+        # Check if the value is a float
+        try:
+            float_value = float(value)
+            return KpiValueType(floatVal=float_value)
+        except ValueError:
+            pass
+        # Check if the value is a boolean
+        if value.lower() in ['true', 'false']:
+            bool_value = value.lower() == 'true'
+            return KpiValueType(boolVal=bool_value)
+        # If none of the above, treat it as a string
+        return KpiValueType(stringVal=value)
+
+
+    def delivery_callback(self, err, msg):
+        if err: LOGGER.debug('Message delivery failed: {:}'.format(err))
+        else:   LOGGER.debug('Message delivered to topic {:}'.format(msg.topic()))
diff --git a/src/kpi_value_api/service/__init__.py b/src/kpi_value_api/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_value_api/service/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_value_api/service/__main__.py b/src/kpi_value_api/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b4ebe296e2c4f193aa1fc99aede9364556c2094
--- /dev/null
+++ b/src/kpi_value_api/service/__main__.py
@@ -0,0 +1,52 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from prometheus_client import start_http_server
+from common.Settings import  get_log_level
+from .KpiValueApiService import KpiValueApiService
+
+terminate = threading.Event()
+LOGGER = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global LOGGER # pylint: disable=global-statement
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level)
+    LOGGER = logging.getLogger(__name__)
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.debug('Starting...')
+
+    grpc_service = KpiValueApiService()
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=1.0): pass
+
+    LOGGER.debug('Terminating...')
+    grpc_service.stop()
+
+    LOGGER.debug('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/kpi_value_api/tests/messages.py b/src/kpi_value_api/tests/messages.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2a1cbb0b275fb26d6498e4470f3869a105a8d36
--- /dev/null
+++ b/src/kpi_value_api/tests/messages.py
@@ -0,0 +1,35 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid, time
+from common.proto.kpi_value_api_pb2 import KpiValue, KpiValueList
+
+
+def create_kpi_value_list():
+    _create_kpi_value_list = KpiValueList()
+    # To run this experiment sucessfully, already existing UUID in KPI DB in necessary.
+    # because the UUID is used to get the descriptor form KPI DB.
+    EXISTING_KPI_IDs = ["725ce3ad-ac67-4373-bd35-8cd9d6a86e09",
+                        str(uuid.uuid4()), 
+                        str(uuid.uuid4())]
+
+    for kpi_id_uuid in EXISTING_KPI_IDs:
+        kpi_value_object = KpiValue()
+        kpi_value_object.kpi_id.kpi_id.uuid      = kpi_id_uuid
+        kpi_value_object.timestamp.timestamp     = float(time.time())
+        kpi_value_object.kpi_value_type.floatVal = 100
+
+        _create_kpi_value_list.kpi_value_list.append(kpi_value_object)
+
+    return _create_kpi_value_list
diff --git a/src/kpi_value_api/tests/test_kpi_value_api.py b/src/kpi_value_api/tests/test_kpi_value_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..307b5cdad4e6503a774e308f669fc44762f84bf1
--- /dev/null
+++ b/src/kpi_value_api/tests/test_kpi_value_api.py
@@ -0,0 +1,84 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os, logging, pytest
+from common.proto.context_pb2 import Empty
+from common.Constants import ServiceNameEnum
+from common.tools.kafka.Variables import KafkaTopic
+from common.Settings import ( 
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
+from kpi_value_api.service.KpiValueApiService import KpiValueApiService
+from kpi_value_api.client.KpiValueApiClient import KpiValueApiClient
+from kpi_value_api.tests.messages import create_kpi_value_list
+
+
+LOCAL_HOST = '127.0.0.1'
+KPIVALUEAPI_SERVICE_PORT = get_service_port_grpc(ServiceNameEnum.KPIVALUEAPI)  # type: ignore
+os.environ[get_env_var_name(ServiceNameEnum.KPIVALUEAPI, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.KPIVALUEAPI, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(KPIVALUEAPI_SERVICE_PORT)
+LOGGER = logging.getLogger(__name__)
+
+# This fixture will be requested by test cases and last during testing session
+@pytest.fixture(scope='session')
+def kpi_value_api_service():
+    LOGGER.info('Initializing KpiValueApiService...')
+    # _service = MonitoringService(name_mapping)
+    _service = KpiValueApiService()
+    _service.start()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding KpiValueApiService...')
+    yield _service
+
+    LOGGER.info('Terminating KpiValueApiService...')
+    _service.stop()
+
+    LOGGER.info('Terminated KpiValueApiService...')
+
+# This fixture will be requested by test cases and last during testing session.
+# The client requires the server, so client fixture has the server as dependency.
+@pytest.fixture(scope='session')
+def kpi_value_api_client(kpi_value_api_service : KpiValueApiService ):
+    LOGGER.info('Initializing KpiValueApiClient...')
+    _client = KpiValueApiClient()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding KpiValueApiClient...')
+    yield _client
+
+    LOGGER.info('Closing KpiValueApiClient...')
+    _client.close()
+
+    LOGGER.info('Closed KpiValueApiClient...')
+
+##################################################
+# Prepare Environment, should be the first test
+##################################################
+
+# To be added here
+
+###########################
+# Tests Implementation of Kpi Value Api
+###########################
+
+def test_validate_kafka_topics():
+    LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ")
+    response = KafkaTopic.create_all_topics()
+    assert isinstance(response, bool)
+
+def test_store_kpi_values(kpi_value_api_client):
+    LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ")
+    response = kpi_value_api_client.StoreKpiValues(create_kpi_value_list())
+    assert isinstance(response, Empty)
diff --git a/src/kpi_value_writer/.gitlab-ci.yml b/src/kpi_value_writer/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..52b1b8fe6fd41c5fd8b43adf4ca8da464c8c08ba
--- /dev/null
+++ b/src/kpi_value_writer/.gitlab-ci.yml
@@ -0,0 +1,109 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build kpi-value-writer:
+  variables:
+    IMAGE_NAME: 'kpi-value-writer' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
+    - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+
+# Apply unit test to the component
+unit_test kpi-value-writer:
+  variables:
+    IMAGE_NAME: 'kpi-value-writer' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build kpi-value-writer
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
+    - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi
+    - docker container prune -f
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker run --name $IMAGE_NAME -d -p 30030:30030 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+    - sleep 5
+    - docker ps -a
+    - docker logs $IMAGE_NAME
+    - >
+      docker exec -i $IMAGE_NAME bash -c
+      "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}_report.xml $IMAGE_NAME/tests/test_*.py"
+    - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+  coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    - docker rm -f $IMAGE_NAME
+    - docker network rm teraflowbridge
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - src/$IMAGE_NAME/tests/Dockerfile
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+  artifacts:
+      when: always
+      reports:
+        junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
+
+## Deployment of the service in Kubernetes Cluster
+#deploy context:
+#  variables:
+#    IMAGE_NAME: 'context' # name of the microservice
+#    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+#  stage: deploy
+#  needs:
+#    - unit test context
+#    # - integ_test execute
+#  script:
+#    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+#    - kubectl get all
+#  # environment:
+#  #   name: test
+#  #   url: https://example.com
+#  #   kubernetes:
+#  #     namespace: test
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual    
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
diff --git a/src/kpi_value_writer/Dockerfile b/src/kpi_value_writer/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..70f41128bd8c982f604a3424d2096c918ead080e
--- /dev/null
+++ b/src/kpi_value_writer/Dockerfile
@@ -0,0 +1,70 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ git && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/kpi_value_writer
+WORKDIR /var/teraflow/kpi_value_writer
+COPY src/kpi_value_writer/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/kpi_value_writer/. kpi_value_writer/
+COPY src/kpi_manager/__init__.py kpi_manager/__init__.py
+COPY src/kpi_manager/client/. kpi_manager/client/
+
+# Start the service
+ENTRYPOINT ["python", "-m", "kpi_value_writer.service"]
diff --git a/src/kpi_value_writer/README.md b/src/kpi_value_writer/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..72ba6e5594adeef4a29d650615716c26273ed115
--- /dev/null
+++ b/src/kpi_value_writer/README.md
@@ -0,0 +1,29 @@
+# How to locally run and test KPI manager micro-service
+
+## --- File links need to be updated. ---
+### Pre-requisets 
+The following requirements should be fulfilled before the execuation of KPI management service.
+
+1. Verify that [kpi_management.proto](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/proto/kpi_management.proto) file exists and grpcs file are generated sucessfully. 
+2. Virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/requirements.in) are installed sucessfully.
+3. Verify the creation of required database and table.
+[KPI DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/database/tests/KpiDBtests.py) python file enlist the functions to create tables and database and
+[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/service/database/KpiEngine.py) contains the DB string, update the string as per your deployment.
+
+### Messages format templates
+["Messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_messages.py) python file enlist the basic gRPC messages format used during the testing.
+
+### Test file
+["KPI management test"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_kpi_manager.py) python file enlist different tests conducted during the experiment.
+
+### Flow of execution (Kpi Maanager Service functions)
+1. Call the `create_database()` and `create_tables()` functions from `Kpi_DB` class to create the required database and table if they don't exist. Call `verify_tables` to verify the existence of KPI table.
+
+2. Call the gRPC method `SetKpiDescriptor(KpiDescriptor)->KpiId` to add the KpiDescriptor in `Kpi` DB. `KpiDescriptor` and `KpiId` are both pre-defined gRPC message types.
+
+3. Call `GetKpiDescriptor(KpiId)->KpiDescriptor` to read the `KpiDescriptor` from DB and `DeleteKpiDescriptor(KpiId)` to delete the `KpiDescriptor` from DB.
+
+4. Call `SelectKpiDescriptor(KpiDescriptorFilter)->KpiDescriptorList` to get all `KpiDescriptor` objects that matches the filter criteria. `KpiDescriptorFilter` and `KpiDescriptorList` are pre-defined gRPC message types.
+
+## For KPI composer and KPI writer
+The functionalities of KPI composer and writer is heavily dependent upon Telemetery service. Therfore, these services has other pre-requsites that are mention [here](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/requirements.in).
\ No newline at end of file
diff --git a/src/kpi_value_writer/__init__.py b/src/kpi_value_writer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_value_writer/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_value_writer/requirements.in b/src/kpi_value_writer/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..7e4694109dc4e1d31b86abfc03162494faafcdaf
--- /dev/null
+++ b/src/kpi_value_writer/requirements.in
@@ -0,0 +1,16 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+confluent-kafka==2.3.*
+requests==2.27.*
diff --git a/src/kpi_value_writer/service/KpiValueWriter.py b/src/kpi_value_writer/service/KpiValueWriter.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4b10ed6391c4ff0b0ee45a287ce9f12d77e2dea
--- /dev/null
+++ b/src/kpi_value_writer/service/KpiValueWriter.py
@@ -0,0 +1,98 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import threading
+from common.tools.kafka.Variables import KafkaConfig, KafkaTopic
+from common.proto.kpi_value_api_pb2 import KpiValue
+from common.proto.kpi_manager_pb2 import KpiDescriptor, KpiId
+
+from confluent_kafka import KafkaError
+from confluent_kafka import Consumer as KafkaConsumer
+
+from kpi_manager.client.KpiManagerClient import KpiManagerClient
+# -- test import --
+from kpi_value_writer.tests.test_messages import create_kpi_descriptor_request
+from .MetricWriterToPrometheus import MetricWriterToPrometheus
+
+
+LOGGER           = logging.getLogger(__name__)
+ACTIVE_CONSUMERS = []
+
+class KpiValueWriter:
+    @staticmethod
+    def RunKafkaConsumer():
+        thread = threading.Thread(target=KpiValueWriter.KafkaConsumer, args=())
+        ACTIVE_CONSUMERS.append(thread)
+        thread.start()
+
+    @staticmethod
+    def KafkaConsumer():
+        kafka_consumer  = KafkaConsumer(
+            { 'bootstrap.servers' : KafkaConfig.SERVER_IP.value,
+              'group.id'          : __class__,
+              'auto.offset.reset' : 'latest'}
+        )
+        
+        metric_writer_to_prom = MetricWriterToPrometheus()
+        kpi_manager_client = KpiManagerClient()
+        print("Kpi manger client created: {:}".format(kpi_manager_client))
+
+        kafka_consumer.subscribe([KafkaTopic.VALUE.value])
+        LOGGER.debug("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value))
+        print("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value))
+        while True:
+            raw_kpi = kafka_consumer.poll(1.0)
+            if raw_kpi is None:
+                continue
+            elif raw_kpi.error():
+                if raw_kpi.error().code() == KafkaError._PARTITION_EOF:
+                    continue
+                else:
+                    print("Consumer error: {}".format(raw_kpi.error()))
+                    continue
+            try:
+                kpi_value = KpiValue()
+                kpi_value.ParseFromString(raw_kpi.value())
+                LOGGER.info("Received KPI : {:}".format(kpi_value))
+                print("Received KPI : {:}".format(kpi_value))
+                KpiValueWriter.get_kpi_descriptor(kpi_value, kpi_manager_client)
+            except Exception as e:
+                print("Error detail: {:}".format(e))
+                continue
+
+    @staticmethod
+    def get_kpi_descriptor(kpi_value: str, kpi_manager_client ):
+        print("--- START -----")
+
+        kpi_id = KpiId()
+        kpi_id.kpi_id.uuid = kpi_value.kpi_id.kpi_id.uuid
+        print("KpiId generated: {:}".format(kpi_id))
+        # print("Kpi manger client created: {:}".format(kpi_manager_client))
+
+        try:
+            kpi_descriptor_object = KpiDescriptor()
+            kpi_descriptor_object = kpi_manager_client.GetKpiDescriptor(kpi_id)
+
+            print("kpi descriptor received: {:}".format(kpi_descriptor_object))
+            if isinstance (kpi_descriptor_object, KpiDescriptor):
+                LOGGER.info("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object))
+                print("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object))
+                MetricWriterToPrometheus.create_and_expose_cooked_kpi(kpi_descriptor_object, kpi_value)
+            else:
+                LOGGER.info("Error in extracting KpiDescriptor {:}".format(kpi_descriptor_object))
+                print("Error in extracting KpiDescriptor {:}".format(kpi_descriptor_object))
+        except Exception as e:
+            LOGGER.info("Unable to get KpiDescriptor. Error: {:}".format(e))
+            print ("Unable to get KpiDescriptor. Error: {:}".format(e))
diff --git a/src/kpi_value_writer/service/KpiWriterOld.py b/src/kpi_value_writer/service/KpiWriterOld.py
new file mode 100644
index 0000000000000000000000000000000000000000..b9a4316b0f5decb2364127052ebf0b44edc05fbd
--- /dev/null
+++ b/src/kpi_value_writer/service/KpiWriterOld.py
@@ -0,0 +1,108 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# read Kafka stream from Kafka topic
+
+import ast
+import time
+import threading
+from confluent_kafka import KafkaError
+from prometheus_client import start_http_server, Gauge, CollectorRegistry
+from confluent_kafka import Consumer as KafkaConsumer
+
+KAFKA_SERVER_IP = '127.0.0.1:9092'
+KAFKA_TOPICS    = {'request' : 'topic_request', 'response': 'topic_response',
+                   'raw'     : 'topic_raw'    , 'labeled' : 'topic_labeled'}
+CONSUMER_CONFIG = {'bootstrap.servers' : KAFKA_SERVER_IP,
+                   'group.id'          : 'kpi_writer',
+                   'auto.offset.reset' : 'latest'}
+KPIs_TO_SEARCH  = ["node_network_receive_packets_total",
+                   "node_network_receive_bytes_total",
+                   "node_network_transmit_bytes_total",
+                   "process_open_fds"]
+PROM_METRICS    = {}
+KAFKA_REGISTERY   = CollectorRegistry()
+
+class KpiWriter:
+    def __init__(self) -> None:
+        pass
+
+    @staticmethod
+    def kpi_writer():
+        KpiWriter.create_prom_metrics_name()
+        threading.Thread(target=KpiWriter.kafka_listener, args=()).start() 
+
+    @staticmethod
+    def kafka_listener():
+        """
+        listener for events on Kafka topic.
+        """
+        # Start up the server to expose the metrics at port number mention below.
+        start_http_server(8101, registry=KAFKA_REGISTERY)
+        kafka_consumer = KafkaConsumer(CONSUMER_CONFIG)
+        kafka_consumer.subscribe([KAFKA_TOPICS['labeled']])
+        while True:
+            receive_msg = kafka_consumer.poll(2.0)
+            if receive_msg is None:
+                # print (" - Telemetry frontend listening on Kafka Topic: ", KAFKA_TOPICS['raw'])     # added for debugging purposes
+                continue
+            elif receive_msg.error():
+                if receive_msg.error().code() == KafkaError._PARTITION_EOF:
+                    continue
+                else:
+                    print("Consumer error: {}".format(receive_msg.error()))
+                    continue
+            try:
+                new_event = receive_msg.value().decode('utf-8')
+                # print("New event on topic '{:}' is {:}".format(KAFKA_TOPICS['raw'], new_event))
+                # LOGGER.info("New event on topic '{:}' is {:}".format(KAFKA_TOPICS['raw'], new_event))
+                KpiWriter.write_metric_to_promtheus(new_event)
+            except Exception as e:
+                print(f"Error to consume event from topic: {KAFKA_TOPICS['labeled']}. Error detail:  {str(e)}")
+                continue
+
+    # send metric to Prometheus
+    @staticmethod
+    def write_metric_to_promtheus(event):
+        event = ast.literal_eval(event)         # converted into dict
+        print("New recevied event: {:}".format(event))
+        event_kpi_name = event['kpi_description']
+        if event_kpi_name in KPIs_TO_SEARCH:
+            PROM_METRICS[event_kpi_name].labels(
+                kpi_id          = event['kpi_id'],
+                kpi_sample_type = event['kpi_sample_type'],
+                device_id       = event['device_id'],
+                endpoint_id     = event['endpoint_id'],
+                service_id      = event['service_id'],
+                slice_id        = event['slice_id'],
+                connection_id   = event['connection_id'],
+                link_id         = event['link_id']
+            ).set(float(event['kpi_value']))
+        time.sleep(0.05)
+
+    @staticmethod
+    def create_prom_metrics_name():
+        metric_tags = ['kpi_id','kpi_sample_type','device_id',
+                       'endpoint_id','service_id','slice_id','connection_id','link_id']
+        for metric_key in KPIs_TO_SEARCH:
+            metric_name        = metric_key
+            metric_description = "description of " + str(metric_key)
+            try:
+                PROM_METRICS[metric_key] = Gauge ( 
+                    metric_name, metric_description, metric_tags, 
+                    registry=KAFKA_REGISTERY )
+                # print("Metric pushed to Prometheus: {:}".format(PROM_METRICS[metric_key]))
+            except ValueError as e:
+                if 'Duplicated timeseries' in str(e):
+                    print("Metric {:} is already registered. Skipping.".format(metric_name))
diff --git a/src/kpi_value_writer/service/MetricWriterToPrometheus.py b/src/kpi_value_writer/service/MetricWriterToPrometheus.py
new file mode 100644
index 0000000000000000000000000000000000000000..b681164786bd310d457998bae55b836522888b94
--- /dev/null
+++ b/src/kpi_value_writer/service/MetricWriterToPrometheus.py
@@ -0,0 +1,96 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# read Kafka stream from Kafka topic
+
+import ast
+import time
+import threading
+import logging
+from prometheus_client import start_http_server, Gauge, CollectorRegistry
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+
+from common.proto.kpi_value_api_pb2 import KpiValue
+from common.proto.kpi_manager_pb2 import KpiDescriptor
+
+LOGGER         = logging.getLogger(__name__)
+PROM_METRICS   = {}
+PROM_REGISTERY = CollectorRegistry()
+
+class MetricWriterToPrometheus:
+    '''
+    This class exposes the *cooked KPI* on the endpoint to be scraped by the Prometheus server.
+    cooked KPI value = KpiDescriptor (gRPC message) + KpiValue (gRPC message)
+    '''
+    def __init__(self):
+        # prometheus server address and configs
+        self.start_prometheus_client()
+        pass
+    
+    def start_prometheus_client(self):
+        start_http_server(10808, registry=PROM_REGISTERY)
+        LOGGER.debug("Prometheus client is started on port 10808")
+
+    def merge_kpi_descriptor_and_kpi_value(self, kpi_descriptor, kpi_value):
+            # Creating a dictionary from the kpi_descriptor's attributes
+            cooked_kpi = {
+                'kpi_id'         : kpi_descriptor.kpi_id.kpi_id.uuid,
+                'kpi_description': kpi_descriptor.kpi_description,
+                'kpi_sample_type': KpiSampleType.Name(kpi_descriptor.kpi_sample_type),
+                'device_id'      : kpi_descriptor.device_id.device_uuid.uuid,
+                'endpoint_id'    : kpi_descriptor.endpoint_id.endpoint_uuid.uuid,
+                'service_id'     : kpi_descriptor.service_id.service_uuid.uuid,
+                'slice_id'       : kpi_descriptor.slice_id.slice_uuid.uuid,
+                'connection_id'  : kpi_descriptor.connection_id.connection_uuid.uuid,
+                'link_id'        : kpi_descriptor.link_id.link_uuid.uuid,
+                'time_stamp'      : kpi_value.timestamp.timestamp,
+                'kpi_value'      : kpi_value.kpi_value_type.floatVal
+            }
+            # LOGGER.debug("Cooked Kpi: {:}".format(cooked_kpi))
+            return cooked_kpi
+
+    def create_and_expose_cooked_kpi(self, kpi_descriptor: KpiDescriptor, kpi_value: KpiValue):
+        # merge both gRPC messages into single varible.
+        cooked_kpi = self.merge_kpi_descriptor_and_kpi_value(kpi_descriptor, kpi_value)
+        tags_to_exclude = {'kpi_description', 'kpi_sample_type', 'kpi_value'} # extracted values will be used as metric tag
+        metric_tags = [tag for tag in cooked_kpi.keys() if tag not in tags_to_exclude]
+        metric_name = cooked_kpi['kpi_sample_type']
+        try:
+            if metric_name not in PROM_METRICS:     # Only register the metric, when it doesn't exists
+                PROM_METRICS[metric_name] = Gauge ( 
+                    metric_name,
+                    cooked_kpi['kpi_description'],
+                    metric_tags,
+                    registry=PROM_REGISTERY
+                )
+            LOGGER.debug("Metric is created with labels: {:}".format(metric_tags))
+            PROM_METRICS[metric_name].labels(
+                    kpi_id          = cooked_kpi['kpi_id'],
+                    device_id       = cooked_kpi['device_id'],
+                    endpoint_id     = cooked_kpi['endpoint_id'],
+                    service_id      = cooked_kpi['service_id'],
+                    slice_id        = cooked_kpi['slice_id'],
+                    connection_id   = cooked_kpi['connection_id'],
+                    link_id         = cooked_kpi['link_id'],
+                    time_stamp      = cooked_kpi['time_stamp'],
+                ).set(float(cooked_kpi['kpi_value']))
+            LOGGER.debug("Metric pushed to the endpoints: {:}".format(PROM_METRICS[metric_name]))
+
+        except ValueError as e:
+            if 'Duplicated timeseries' in str(e):
+                LOGGER.debug("Metric {:} is already registered. Skipping.".format(metric_name))
+                print("Metric {:} is already registered. Skipping.".format(metric_name))
+            else:
+                LOGGER.error("Error while pushing metric: {}".format(e))
+                raise
\ No newline at end of file
diff --git a/src/kpi_value_writer/service/__init__.py b/src/kpi_value_writer/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_value_writer/service/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_value_writer/service/__main__.py b/src/kpi_value_writer/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa67540fb899781297d1235dc2e15bcbb2c38585
--- /dev/null
+++ b/src/kpi_value_writer/service/__main__.py
@@ -0,0 +1,51 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from kpi_value_writer.service.KpiValueWriter import KpiValueWriter
+from common.Settings import get_log_level
+
+terminate = threading.Event()
+LOGGER = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global LOGGER # pylint: disable=global-statement
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level)
+    LOGGER = logging.getLogger(__name__)
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.debug('Starting...')
+
+    grpc_service = KpiValueWriter()
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=1.0): pass
+
+    LOGGER.debug('Terminating...')
+    grpc_service.stop()
+
+    LOGGER.debug('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/kpi_value_writer/tests/test_kpi_value_writer.py b/src/kpi_value_writer/tests/test_kpi_value_writer.py
new file mode 100755
index 0000000000000000000000000000000000000000..572495d48d70cdc40c0ef6bb1efcf877e2a610ee
--- /dev/null
+++ b/src/kpi_value_writer/tests/test_kpi_value_writer.py
@@ -0,0 +1,52 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from kpi_value_writer.service.KpiValueWriter import KpiValueWriter
+from common.tools.kafka.Variables import KafkaTopic
+from kpi_manager.client.KpiManagerClient import KpiManagerClient
+from kpi_manager.tests.test_messages import create_kpi_descriptor_request
+from common.proto.kpi_manager_pb2 import KpiDescriptor
+from kpi_value_writer.tests.test_messages import create_kpi_id_request
+
+LOGGER = logging.getLogger(__name__)
+
+# def test_GetKpiDescriptor():
+#     LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ")
+#     kpi_manager_client = KpiManagerClient()
+#     # adding KPI
+#     LOGGER.info(" --->>> calling SetKpiDescriptor ")
+#     response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+#     # get KPI
+#     LOGGER.info(" --->>> calling GetKpiDescriptor with response ID")
+#     response = kpi_manager_client.GetKpiDescriptor(response_id)
+#     LOGGER.info("Response gRPC message object: {:}".format(response))
+    
+#     LOGGER.info(" --->>> calling GetKpiDescriptor with random ID")
+#     rand_response = kpi_manager_client.GetKpiDescriptor(create_kpi_id_request())
+#     LOGGER.info("Response gRPC message object: {:}".format(rand_response))
+
+#     LOGGER.info("\n------------------ TEST FINISHED ---------------------\n")
+#     assert isinstance(response, KpiDescriptor)
+
+# -------- Initial Test ----------------
+def test_validate_kafka_topics():
+    LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ")
+    response = KafkaTopic.create_all_topics()
+    assert isinstance(response, bool)
+
+def test_KafkaConsumer():
+    LOGGER.debug(" --->>> test_kafka_consumer: START <<<--- ")
+    KpiValueWriter.RunKafkaConsumer()
+
diff --git a/src/kpi_value_writer/tests/test_messages.py b/src/kpi_value_writer/tests/test_messages.py
new file mode 100755
index 0000000000000000000000000000000000000000..89a41fa08ad37b7d9b305bba6e7c445fea5cd18a
--- /dev/null
+++ b/src/kpi_value_writer/tests/test_messages.py
@@ -0,0 +1,44 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid, time
+import random
+from common.proto import kpi_manager_pb2
+from common.proto.kpi_value_api_pb2 import KpiValue
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+
+def create_kpi_id_request():
+    _create_kpi_id = kpi_manager_pb2.KpiId()
+    _create_kpi_id.kpi_id.uuid = str(uuid.uuid4())
+    return _create_kpi_id
+
+def create_kpi_descriptor_request(description: str = "Test Description"):
+    _create_kpi_request                                    = kpi_manager_pb2.KpiDescriptor()
+    _create_kpi_request.kpi_id.kpi_id.uuid                 = str(uuid.uuid4())
+    _create_kpi_request.kpi_description                    = description
+    _create_kpi_request.kpi_sample_type                    = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
+    _create_kpi_request.device_id.device_uuid.uuid         = 'DEV4'  
+    _create_kpi_request.service_id.service_uuid.uuid       = 'SERV3' 
+    _create_kpi_request.slice_id.slice_uuid.uuid           = 'SLC3'  
+    _create_kpi_request.endpoint_id.endpoint_uuid.uuid     = 'END2'  
+    _create_kpi_request.connection_id.connection_uuid.uuid = 'CON2'  
+    _create_kpi_request.link_id.link_uuid.uuid             = 'LNK2'  
+    return _create_kpi_request
+
+def create_kpi_value_request():
+    _create_kpi_value_request                         = KpiValue()
+    _create_kpi_value_request.kpi_id.kpi_id.uuid      = str(uuid.uuid4())
+    _create_kpi_value_request.timestamp.timestamp     = time.time()
+    _create_kpi_value_request.kpi_value_type.floatVal = random.randint(10, 10000)
+    return _create_kpi_value_request
diff --git a/src/kpi_value_writer/tests/test_metric_writer_to_prom.py b/src/kpi_value_writer/tests/test_metric_writer_to_prom.py
new file mode 100644
index 0000000000000000000000000000000000000000..f60e96253ae8edb29eedcbe2d6e66aaeb450229c
--- /dev/null
+++ b/src/kpi_value_writer/tests/test_metric_writer_to_prom.py
@@ -0,0 +1,28 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import threading
+import logging
+from kpi_value_writer.service.MetricWriterToPrometheus import MetricWriterToPrometheus
+from kpi_value_writer.tests.test_messages import create_kpi_descriptor_request, create_kpi_value_request
+
+LOGGER = logging.getLogger(__name__)
+
+def test_metric_writer_to_prometheus():
+    LOGGER.info(' >>> test_metric_writer_to_prometheus START <<< ')
+    metric_writer_obj = MetricWriterToPrometheus()
+    metric_writer_obj.create_and_expose_cooked_kpi(
+                        create_kpi_descriptor_request(),
+                        create_kpi_value_request()
+        )
diff --git a/src/telemetry/README.md b/src/telemetry/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..da43bd471c384ae9133871a097e94043f70ed7de
--- /dev/null
+++ b/src/telemetry/README.md
@@ -0,0 +1,10 @@
+# How to locally run and test Telemetry service
+
+### Pre-requisets 
+The following requirements should be fulfilled before the execuation of Telemetry service.
+
+1. verify that [telmetry_frontend.proto](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/proto/telemetry_frontend.proto) file exists and grpcs file are generated sucessfully. 
+2. virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/telemetry_virenv.txt) are installed sucessfully.
+3. verify the creation of required database and table.
+[DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/database/tests/managementDBtests.py) python file enlist the functions to create tables and database.
+[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_manager/service/database/KpiEngine.py) contains the DB string, update the string as per your deployment.
diff --git a/src/telemetry/__init__.py b/src/telemetry/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..234a1af6588c91f6a17f3963f69120cd6e2248d9
--- /dev/null
+++ b/src/telemetry/__init__.py
@@ -0,0 +1,15 @@
+
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/telemetry/backend/__init__.py b/src/telemetry/backend/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02
--- /dev/null
+++ b/src/telemetry/backend/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/telemetry/backend/service/TelemetryBackendService.py b/src/telemetry/backend/service/TelemetryBackendService.py
new file mode 100755
index 0000000000000000000000000000000000000000..d81be79dbe410ccbf2781816f34735f6bfe5639d
--- /dev/null
+++ b/src/telemetry/backend/service/TelemetryBackendService.py
@@ -0,0 +1,253 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import ast
+import time
+import random
+import logging
+import requests
+import threading
+from typing import Any, Tuple
+from common.proto.context_pb2 import Empty
+from confluent_kafka import Producer as KafkaProducer
+from confluent_kafka import Consumer as KafkaConsumer
+from confluent_kafka import KafkaException
+from confluent_kafka import KafkaError
+from confluent_kafka.admin import AdminClient, NewTopic
+from common.proto.telemetry_frontend_pb2 import Collector, CollectorId
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+
+LOGGER             = logging.getLogger(__name__)
+METRICS_POOL       = MetricsPool('Telemetry', 'TelemetryBackend')
+KAFKA_SERVER_IP    = '127.0.0.1:9092'
+# KAFKA_SERVER_IP    = '10.152.183.175:30092'
+ADMIN_KAFKA_CLIENT = AdminClient({'bootstrap.servers': KAFKA_SERVER_IP})
+KAFKA_TOPICS       = {'request' : 'topic_request', 'response': 'topic_response',
+                      'raw'     : 'topic_raw'    , 'labeled' : 'topic_labeled'}
+EXPORTER_ENDPOINT  = "http://10.152.183.2:9100/metrics"
+PRODUCER_CONFIG    = {'bootstrap.servers': KAFKA_SERVER_IP,}
+
+
+class TelemetryBackendService:
+    """
+    Class to listens for request on Kafka topic, fetches metrics and produces measured values to another Kafka topic.
+    """
+
+    def __init__(self):
+        LOGGER.info('Init TelemetryBackendService')
+        self.running_threads = {}
+    
+    def run_kafka_listener(self)->bool:
+        threading.Thread(target=self.kafka_listener).start()
+        return True        
+    
+    def kafka_listener(self):
+        """
+        listener for requests on Kafka topic.
+        """
+        conusmer_configs = {
+            'bootstrap.servers' : KAFKA_SERVER_IP,
+            'group.id'          : 'backend',
+            'auto.offset.reset' : 'latest'
+        }
+        # topic_request = "topic_request"
+        consumerObj = KafkaConsumer(conusmer_configs)
+        # consumerObj.subscribe([topic_request])
+        consumerObj.subscribe([KAFKA_TOPICS['request']])
+
+        while True:
+            receive_msg = consumerObj.poll(2.0)
+            if receive_msg is None:
+                # print (time.time(), " - Telemetry backend is listening on Kafka Topic: ", KAFKA_TOPICS['request'])     # added for debugging purposes
+                continue
+            elif receive_msg.error():
+                if receive_msg.error().code() == KafkaError._PARTITION_EOF:
+                    continue
+                else:
+                    print("Consumer error: {}".format(receive_msg.error()))
+                    break
+            (kpi_id, duration, interval) = ast.literal_eval(receive_msg.value().decode('utf-8'))
+            collector_id = receive_msg.key().decode('utf-8')
+            if duration == -1 and interval == -1:
+                self.terminate_collector_backend(collector_id)
+                # threading.Thread(target=self.terminate_collector_backend, args=(collector_id))
+            else:
+                self.run_initiate_collector_backend(collector_id, kpi_id, duration, interval)
+
+
+    def run_initiate_collector_backend(self, collector_id: str, kpi_id: str, duration: int, interval: int):
+        stop_event = threading.Event()
+        thread = threading.Thread(target=self.initiate_collector_backend, 
+                                  args=(collector_id, kpi_id, duration, interval, stop_event))
+        self.running_threads[collector_id] = (thread, stop_event)
+        thread.start()
+
+    def initiate_collector_backend(self, collector_id, kpi_id, duration, interval, stop_event
+                        ): # type: ignore
+        """
+        Method to receive collector request attribues and initiates collecter backend.
+        """
+        print("Initiating backend for collector: ", collector_id)
+        start_time = time.time()
+        while not stop_event.is_set():
+            if time.time() - start_time >= duration:            # condition to terminate backend
+                print("Execuation duration completed: Terminating backend: Collector Id: ", collector_id, " - ", time.time() - start_time)
+                self.generate_kafka_response(collector_id, "-1", -1)
+                # write to Kafka to send the termination confirmation.
+                break
+            # print ("Received KPI: ", kpi_id, ", Duration: ", duration, ", Fetch Interval: ", interval)
+            self.extract_kpi_value(collector_id, kpi_id)
+            # print ("Telemetry Backend running for KPI: ", kpi_id, "after FETCH INTERVAL: ", interval)
+            time.sleep(interval)
+
+    def extract_kpi_value(self, collector_id: str, kpi_id: str):
+        """
+        Method to extract kpi value.
+        """
+        measured_kpi_value = random.randint(1,100)                  # Should be extracted from exporter/stream
+        # measured_kpi_value = self.fetch_node_exporter_metrics()     # exporter extracted metric value against default KPI
+        self.generate_kafka_response(collector_id, kpi_id , measured_kpi_value)
+
+    def generate_kafka_response(self, collector_id: str, kpi_id: str, kpi_value: Any):
+        """
+        Method to write response on Kafka topic
+        """
+        # topic_response = "topic_response"
+        msg_value : Tuple [str, Any] = (kpi_id, kpi_value)
+        msg_key    = collector_id
+        producerObj = KafkaProducer(PRODUCER_CONFIG)
+        # producerObj.produce(topic_response, key=msg_key, value= str(msg_value), callback=self.delivery_callback)
+        producerObj.produce(KAFKA_TOPICS['response'], key=msg_key, value= str(msg_value), callback=TelemetryBackendService.delivery_callback)
+        producerObj.flush()
+
+    def terminate_collector_backend(self, collector_id):
+        if collector_id in self.running_threads:
+            thread, stop_event = self.running_threads[collector_id]
+            stop_event.set()
+            thread.join()
+            print ("Terminating backend (by StopCollector): Collector Id: ", collector_id)
+            del self.running_threads[collector_id]
+            self.generate_kafka_response(collector_id, "-1", -1)
+
+    def create_topic_if_not_exists(self, new_topics: list) -> bool:
+        """
+        Method to create Kafka topic if it does not exist.
+        Args:
+            admin_client (AdminClient): Kafka admin client.
+        """
+        for topic in new_topics:
+            try:
+                topic_metadata = ADMIN_KAFKA_CLIENT.list_topics(timeout=5)
+                if topic not in topic_metadata.topics:
+                    # If the topic does not exist, create a new topic
+                    print(f"Topic '{topic}' does not exist. Creating...")
+                    LOGGER.warning("Topic {:} does not exist. Creating...".format(topic))
+                    new_topic = NewTopic(topic, num_partitions=1, replication_factor=1)
+                    ADMIN_KAFKA_CLIENT.create_topics([new_topic])
+            except KafkaException as e:
+                print(f"Failed to create topic: {e}")
+                return False
+        return True
+
+    @staticmethod
+    def delivery_callback( err, msg):
+        """
+        Callback function to handle message delivery status.
+        Args:
+            err (KafkaError): Kafka error object.
+            msg (Message): Kafka message object.
+        """
+        if err:
+            print(f'Message delivery failed: {err}')
+        else:
+            print(f'Message delivered to topic {msg.topic()}')
+
+# ----------- BELOW: Actual Implementation of Kafka Producer with Node Exporter -----------
+    @staticmethod
+    def fetch_single_node_exporter_metric():
+        """
+        Method to fetch metrics from Node Exporter.
+        Returns:
+            str: Metrics fetched from Node Exporter.
+        """
+        KPI = "node_network_receive_packets_total"
+        try:
+            response = requests.get(EXPORTER_ENDPOINT) # type: ignore
+            LOGGER.info("Request status {:}".format(response))
+            if response.status_code == 200:
+                # print(f"Metrics fetched sucessfully...")
+                metrics = response.text
+                # Check if the desired metric is available in the response
+                if KPI in metrics:
+                    KPI_VALUE = TelemetryBackendService.extract_metric_value(metrics, KPI)
+                    # Extract the metric value
+                    if KPI_VALUE is not None:
+                        LOGGER.info("Extracted value of {:} is {:}".format(KPI, KPI_VALUE))
+                        print(f"Extracted value of {KPI} is: {KPI_VALUE}")
+                        return KPI_VALUE
+            else:
+                LOGGER.info("Failed to fetch metrics. Status code: {:}".format(response.status_code))
+                # print(f"Failed to fetch metrics. Status code: {response.status_code}")
+                return None
+        except Exception as e:
+            LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e))
+            # print(f"Failed to fetch metrics: {str(e)}")
+            return None
+
+    @staticmethod
+    def extract_metric_value(metrics, metric_name):
+        """
+        Method to extract the value of a metric from the metrics string.
+        Args:
+            metrics (str): Metrics string fetched from Exporter.
+            metric_name (str): Name of the metric to extract.
+        Returns:
+            float: Value of the extracted metric, or None if not found.
+        """
+        try:
+            # Find the metric line containing the desired metric name
+            metric_line = next(line for line in metrics.split('\n') if line.startswith(metric_name))
+            # Split the line to extract the metric value
+            metric_value = float(metric_line.split()[1])
+            return metric_value
+        except StopIteration:
+            print(f"Metric '{metric_name}' not found in the metrics.")
+            return None
+
+    @staticmethod
+    def stream_node_export_metrics_to_raw_topic():
+        try:
+            while True:
+                response = requests.get(EXPORTER_ENDPOINT)
+                # print("Response Status {:} ".format(response))
+                # LOGGER.info("Response Status {:} ".format(response))
+                try: 
+                    if response.status_code == 200:
+                        producerObj = KafkaProducer(PRODUCER_CONFIG)
+                        producerObj.produce(KAFKA_TOPICS['raw'], key="raw", value= str(response.text), callback=TelemetryBackendService.delivery_callback)
+                        producerObj.flush()
+                        LOGGER.info("Produce to topic")
+                    else:
+                        LOGGER.info("Didn't received expected response. Status code: {:}".format(response.status_code))
+                        print(f"Didn't received expected response. Status code: {response.status_code}")
+                        return None
+                    time.sleep(15)
+                except Exception as e:
+                    LOGGER.info("Failed to process response. Status code: {:}".format(e))
+                    return None
+        except Exception as e:
+            LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e))
+            print(f"Failed to fetch metrics: {str(e)}")
+            return None
+# ----------- ABOVE: Actual Implementation of Kafka Producer with Node Exporter -----------
\ No newline at end of file
diff --git a/src/telemetry/backend/service/__init__.py b/src/telemetry/backend/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02
--- /dev/null
+++ b/src/telemetry/backend/service/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/telemetry/backend/tests/__init__.py b/src/telemetry/backend/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02
--- /dev/null
+++ b/src/telemetry/backend/tests/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/telemetry/backend/tests/messagesBackend.py b/src/telemetry/backend/tests/messagesBackend.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cf553eaaec41de7599b6723e31e4ca3f82cbcae
--- /dev/null
+++ b/src/telemetry/backend/tests/messagesBackend.py
@@ -0,0 +1,15 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
diff --git a/src/telemetry/backend/tests/testTelemetryBackend.py b/src/telemetry/backend/tests/testTelemetryBackend.py
new file mode 100644
index 0000000000000000000000000000000000000000..d832e54e77589ca677682760d19e68b1bd09b1f7
--- /dev/null
+++ b/src/telemetry/backend/tests/testTelemetryBackend.py
@@ -0,0 +1,53 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+print (sys.path)
+sys.path.append('/home/tfs/tfs-ctrl')
+import threading
+import logging
+from typing import Tuple
+# from common.proto.context_pb2 import Empty
+from src.telemetry.backend.service.TelemetryBackendService import TelemetryBackendService
+
+LOGGER = logging.getLogger(__name__)
+
+
+###########################
+# Tests Implementation of Telemetry Backend
+###########################
+
+def test_verify_kafka_topics():
+    LOGGER.info('test_verify_kafka_topics requesting')
+    TelemetryBackendServiceObj = TelemetryBackendService()
+    KafkaTopics = ['topic_request', 'topic_response', 'topic_raw', 'topic_labled']
+    response = TelemetryBackendServiceObj.create_topic_if_not_exists(KafkaTopics)
+    LOGGER.debug(str(response))
+    assert isinstance(response, bool)
+
+# def test_run_kafka_listener():
+#     LOGGER.info('test_receive_kafka_request requesting')
+#     TelemetryBackendServiceObj = TelemetryBackendService()
+#     response = TelemetryBackendServiceObj.run_kafka_listener()
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, bool)
+
+# def test_fetch_node_exporter_metrics():
+#     LOGGER.info(' >>> test_fetch_node_exporter_metrics START <<< ')
+#     TelemetryBackendService.fetch_single_node_exporter_metric()
+
+def test_stream_node_export_metrics_to_raw_topic():
+    LOGGER.info(' >>> test_stream_node_export_metrics_to_raw_topic START <<< ')
+    threading.Thread(target=TelemetryBackendService.stream_node_export_metrics_to_raw_topic, args=()).start()
+
diff --git a/src/telemetry/database/TelemetryDBmanager.py b/src/telemetry/database/TelemetryDBmanager.py
new file mode 100644
index 0000000000000000000000000000000000000000..b558180a9e1fbf85bf523c7faededf58f57e2264
--- /dev/null
+++ b/src/telemetry/database/TelemetryDBmanager.py
@@ -0,0 +1,248 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, time
+import sqlalchemy
+from sqlalchemy import inspect, MetaData, Table
+from sqlalchemy.orm import sessionmaker
+from telemetry.database.TelemetryModel import Collector as CollectorModel
+from telemetry.database.TelemetryModel import Kpi as KpiModel
+from sqlalchemy.ext.declarative import declarative_base
+from telemetry.database.TelemetryEngine import TelemetryEngine
+from common.proto.kpi_manager_pb2 import KpiDescriptor, KpiId
+from common.proto.telemetry_frontend_pb2 import Collector, CollectorId
+from sqlalchemy.exc import SQLAlchemyError
+from telemetry.database.TelemetryModel import Base
+
+LOGGER = logging.getLogger(__name__)
+DB_NAME = "telemetryfrontend"
+
+class TelemetryDBmanager:
+    def __init__(self):
+        self.db_engine = TelemetryEngine.get_engine()
+        if self.db_engine is None:
+            LOGGER.error('Unable to get SQLAlchemy DB Engine...')
+            return False
+        self.db_name = DB_NAME
+        self.Session = sessionmaker(bind=self.db_engine)
+
+    def create_database(self):
+        try:
+            # with self.db_engine.connect() as connection:
+            #     connection.execute(f"CREATE DATABASE {self.db_name};")
+            TelemetryEngine.create_database(self.db_engine)
+            LOGGER.info('TelemetryDBmanager initalized DB Name: {:}'.format(self.db_name))
+            return True
+        except Exception as e: # pylint: disable=bare-except # pragma: no cover
+            LOGGER.exception('Failed to check/create the database: {:s}'.format(str(e)))
+            return False
+
+    def create_tables(self):
+        try:
+            Base.metadata.create_all(self.db_engine)     # type: ignore
+            LOGGER.info("Tables created in database ({:}) the as per Models".format(self.db_name))
+        except Exception as e:
+            LOGGER.info("Tables cannot be created in the TelemetryFrontend database. {:s}".format(str(e)))
+
+    def verify_tables(self):
+        try:
+            with self.db_engine.connect() as connection:
+                result = connection.execute("SHOW TABLES;")
+                tables = result.fetchall()
+                LOGGER.info("Tables in DB: {:}".format(tables))
+        except Exception as e:
+            LOGGER.info("Unable to fetch Table names. {:s}".format(str(e)))
+
+    def drop_table(self, table_to_drop: str):
+        try:
+            inspector = inspect(self.db_engine)
+            existing_tables = inspector.get_table_names()
+            if table_to_drop in existing_tables:
+                table = Table(table_to_drop, MetaData(), autoload_with=self.db_engine)
+                table.drop(self.db_engine)
+                LOGGER.info("Tables delete in the DB Name: {:}".format(self.db_name))
+            else:
+                LOGGER.warning("No table {:} in database {:} ".format(table_to_drop, DB_NAME))
+        except Exception as e:
+            LOGGER.info("Tables cannot be deleted in the {:} database. {:s}".format(DB_NAME, str(e)))
+
+    def list_databases(self):
+        query = "SHOW DATABASES"
+        with self.db_engine.connect() as connection:
+            result = connection.execute(query)
+            databases = [row[0] for row in result]
+        LOGGER.info("List of available DBs: {:}".format(databases))
+        
+# ------------------ INSERT METHODs --------------------------------------
+
+    def inser_kpi(self, request: KpiDescriptor):
+        session = self.Session()
+        try:
+            # Create a new Kpi instance
+            kpi_to_insert                 = KpiModel()
+            kpi_to_insert.kpi_id          = request.kpi_id.kpi_id.uuid
+            kpi_to_insert.kpi_description = request.kpi_description
+            kpi_to_insert.kpi_sample_type = request.kpi_sample_type
+            kpi_to_insert.device_id       = request.service_id.service_uuid.uuid 
+            kpi_to_insert.endpoint_id     = request.device_id.device_uuid.uuid 
+            kpi_to_insert.service_id      = request.slice_id.slice_uuid.uuid 
+            kpi_to_insert.slice_id        = request.endpoint_id.endpoint_uuid.uuid
+            kpi_to_insert.connection_id   = request.connection_id.connection_uuid.uuid
+            # kpi_to_insert.link_id         = request.link_id.link_id.uuid
+            # Add the instance to the session
+            session.add(kpi_to_insert)
+            session.commit()
+            LOGGER.info("Row inserted into kpi table: {:}".format(kpi_to_insert.kpi_id))
+        except Exception as e:
+            session.rollback()
+            LOGGER.info("Failed to insert new kpi. {:s}".format(str(e)))
+        finally:
+            # Close the session
+            session.close()
+
+    # Function to insert a row into the Collector model
+    def insert_collector(self, request: Collector):
+        session = self.Session()
+        try:
+            # Create a new Collector instance
+            collector_to_insert                     = CollectorModel()
+            collector_to_insert.collector_id        = request.collector_id.collector_id.uuid
+            collector_to_insert.kpi_id              = request.kpi_id.kpi_id.uuid  
+            collector_to_insert.collector           = "Test collector description"
+            collector_to_insert.sampling_duration_s = request.duration_s
+            collector_to_insert.sampling_interval_s = request.interval_s
+            collector_to_insert.start_timestamp     = time.time()
+            collector_to_insert.end_timestamp       = time.time()
+            
+            session.add(collector_to_insert)
+            session.commit()
+            LOGGER.info("Row inserted into collector table: {:}".format(collector_to_insert.collector_id))
+        except Exception as e:
+            session.rollback()
+            LOGGER.info("Failed to insert new collector. {:s}".format(str(e)))
+        finally:
+            # Close the session
+            session.close()
+
+# ------------------ GET METHODs --------------------------------------
+
+    def get_kpi_descriptor(self, request: KpiId):
+        session = self.Session()
+        try:
+            kpi_id_to_search = request.kpi_id.uuid
+            kpi = session.query(KpiModel).filter_by(kpi_id=kpi_id_to_search).first()
+            if kpi:
+                LOGGER.info("kpi ID found: {:s}".format(str(kpi)))
+                return kpi
+            else:
+                LOGGER.warning("Kpi ID not found {:s}".format(str(kpi_id_to_search)))
+                return None
+        except Exception as e:
+            session.rollback()
+            LOGGER.info("Failed to retrieve KPI ID. {:s}".format(str(e)))
+            raise
+        finally:
+            session.close()
+
+    def get_collector(self, request: CollectorId):
+        session = self.Session()
+        try:
+            collector_id_to_search = request.collector_id.uuid
+            collector = session.query(CollectorModel).filter_by(collector_id=collector_id_to_search).first()
+            if collector:
+                LOGGER.info("collector ID found: {:s}".format(str(collector)))
+                return collector
+            else:
+                LOGGER.warning("collector ID not found{:s}".format(str(collector_id_to_search)))
+                return None
+        except Exception as e:
+            session.rollback()
+            LOGGER.info("Failed to retrieve collector ID. {:s}".format(str(e)))
+            raise
+        finally:
+            session.close()
+    
+    # ------------------ SELECT METHODs --------------------------------------
+
+    def select_kpi_descriptor(self, **filters):
+        session = self.Session()
+        try:
+            query = session.query(KpiModel)
+            for column, value in filters.items():
+                query = query.filter(getattr(KpiModel, column) == value)
+            result = query.all()
+            if len(result) != 0:
+                LOGGER.info("Fetched filtered rows from KPI table with filters : {:s}".format(str(result)))
+            else:
+                LOGGER.warning("No matching row found : {:s}".format(str(result)))
+            return result
+        except SQLAlchemyError as e:
+            LOGGER.error("Error fetching filtered rows from KPI table with filters {:}: {:}".format(filters, e))
+            return []
+        finally:
+            session.close()
+    
+    def select_collector(self, **filters):
+        session = self.Session()
+        try:
+            query = session.query(CollectorModel)
+            for column, value in filters.items():
+                query = query.filter(getattr(CollectorModel, column) == value)
+            result = query.all()
+            if len(result) != 0:
+                LOGGER.info("Fetched filtered rows from KPI table with filters : {:s}".format(str(result)))
+            else:
+                LOGGER.warning("No matching row found : {:s}".format(str(result)))            
+            return result
+        except SQLAlchemyError as e:
+            LOGGER.error("Error fetching filtered rows from KPI table with filters {:}: {:}".format(filters, e))
+            return []
+        finally:
+            session.close()
+
+# ------------------ DELETE METHODs --------------------------------------
+
+    def delete_kpi_descriptor(self, request: KpiId):
+        session = self.Session()
+        try:
+            kpi_id_to_delete = request.kpi_id.uuid
+            kpi = session.query(KpiModel).filter_by(kpi_id=kpi_id_to_delete).first()
+            if kpi:
+                session.delete(kpi)
+                session.commit()
+                LOGGER.info("Deleted KPI with kpi_id: %s", kpi_id_to_delete)
+            else:
+                LOGGER.warning("KPI with kpi_id %s not found", kpi_id_to_delete)
+        except SQLAlchemyError as e:
+            session.rollback()
+            LOGGER.error("Error deleting KPI with kpi_id %s: %s", kpi_id_to_delete, e)
+        finally:
+            session.close()
+
+    def delete_collector(self, request: CollectorId):
+        session = self.Session()
+        try:
+            collector_id_to_delete = request.collector_id.uuid
+            collector = session.query(CollectorModel).filter_by(collector_id=collector_id_to_delete).first()
+            if collector:
+                session.delete(collector)
+                session.commit()
+                LOGGER.info("Deleted collector with collector_id: %s", collector_id_to_delete)
+            else:
+                LOGGER.warning("collector with collector_id %s not found", collector_id_to_delete)
+        except SQLAlchemyError as e:
+            session.rollback()
+            LOGGER.error("Error deleting collector with collector_id %s: %s", collector_id_to_delete, e)
+        finally:
+            session.close()
\ No newline at end of file
diff --git a/src/telemetry/database/TelemetryEngine.py b/src/telemetry/database/TelemetryEngine.py
new file mode 100644
index 0000000000000000000000000000000000000000..a563fa09f94c812aed07d0aa3cbd5bc988737fc4
--- /dev/null
+++ b/src/telemetry/database/TelemetryEngine.py
@@ -0,0 +1,59 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, sqlalchemy, sqlalchemy_utils
+# from common.Settings import get_setting
+
+LOGGER = logging.getLogger(__name__)
+
+APP_NAME = 'tfs'
+ECHO = False                # False: No dump SQL commands and transactions executed
+CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}'
+# CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}'
+
+class TelemetryEngine:
+    # def __init__(self):
+    #     self.engine = self.get_engine()
+    @staticmethod
+    def get_engine() -> sqlalchemy.engine.Engine:
+        CRDB_NAMESPACE = "crdb"
+        CRDB_SQL_PORT  = "26257"
+        CRDB_DATABASE  = "telemetryfrontend"
+        CRDB_USERNAME  = "tfs"
+        CRDB_PASSWORD  = "tfs123"
+        CRDB_SSLMODE   = "require"
+        crdb_uri = CRDB_URI_TEMPLATE.format(
+                CRDB_USERNAME, CRDB_PASSWORD, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
+        # crdb_uri = CRDB_URI_TEMPLATE.format(
+        #         CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
+        try:
+            # engine = sqlalchemy.create_engine(
+            #     crdb_uri, connect_args={'application_name': APP_NAME}, echo=ECHO, future=True)
+            engine = sqlalchemy.create_engine(crdb_uri, echo=False)
+            LOGGER.info(' TelemetryDBmanager initalized with DB URL: {:}'.format(crdb_uri))
+        except: # pylint: disable=bare-except # pragma: no cover
+            LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri)))
+            return None # type: ignore
+        return engine # type: ignore
+
+    @staticmethod
+    def create_database(engine : sqlalchemy.engine.Engine) -> None:
+        if not sqlalchemy_utils.database_exists(engine.url):
+            LOGGER.info("Database created. {:}".format(engine.url))
+            sqlalchemy_utils.create_database(engine.url)
+
+    @staticmethod
+    def drop_database(engine : sqlalchemy.engine.Engine) -> None:
+        if sqlalchemy_utils.database_exists(engine.url):
+            sqlalchemy_utils.drop_database(engine.url)
diff --git a/src/telemetry/database/TelemetryModel.py b/src/telemetry/database/TelemetryModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..be4f0969c86638520cf226b8e42db90426165804
--- /dev/null
+++ b/src/telemetry/database/TelemetryModel.py
@@ -0,0 +1,45 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy import Column, Integer, String, Float, Text, ForeignKey
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import sessionmaker, relationship
+from sqlalchemy.orm import registry
+
+logging.basicConfig(level=logging.INFO)
+LOGGER = logging.getLogger(__name__)
+
+# Create a base class for declarative models
+Base = registry().generate_base()
+# Base = declarative_base()
+    
+class Collector(Base):
+    __tablename__ = 'collector'
+
+    collector_id         = Column(UUID(as_uuid=False), primary_key=True)
+    kpi_id               = Column(UUID(as_uuid=False))
+    collector_decription = Column(String)
+    sampling_duration_s  = Column(Float)
+    sampling_interval_s  = Column(Float)
+    start_timestamp      = Column(Float)
+    end_timestamp        = Column(Float)
+
+
+    def __repr__(self):
+        return (f"<Collector(collector_id='{self.collector_id}', kpi_id='{self.kpi_id}', "
+                f"collector='{self.collector_decription}', sampling_duration_s='{self.sampling_duration_s}', "
+                f"sampling_interval_s='{self.sampling_interval_s}', start_timestamp='{self.start_timestamp}', "
+                f"end_timestamp='{self.end_timestamp}')>")
\ No newline at end of file
diff --git a/src/telemetry/database/__init__.py b/src/telemetry/database/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/telemetry/database/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/telemetry/database/__main__.py b/src/telemetry/database/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cf553eaaec41de7599b6723e31e4ca3f82cbcae
--- /dev/null
+++ b/src/telemetry/database/__main__.py
@@ -0,0 +1,15 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
diff --git a/src/telemetry/database/managementDB.py b/src/telemetry/database/managementDB.py
new file mode 100644
index 0000000000000000000000000000000000000000..f79126f279d7bbece6c08ae5eb1cd74e340d1c7d
--- /dev/null
+++ b/src/telemetry/database/managementDB.py
@@ -0,0 +1,138 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, time
+import sqlalchemy
+import sqlalchemy_utils
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.ext.declarative import declarative_base
+from telemetry.database.TelemetryEngine import TelemetryEngine
+from telemetry.database.TelemetryModel import Base
+
+LOGGER = logging.getLogger(__name__)
+DB_NAME = "telemetryfrontend"
+
+# # Create a base class for declarative models
+# Base = declarative_base()
+
+class managementDB:
+    def __init__(self):
+        self.db_engine = TelemetryEngine.get_engine()
+        if self.db_engine is None:
+            LOGGER.error('Unable to get SQLAlchemy DB Engine...')
+            return False
+        self.db_name = DB_NAME
+        self.Session = sessionmaker(bind=self.db_engine)
+
+    @staticmethod
+    def create_database(engine : sqlalchemy.engine.Engine) -> None:
+        if not sqlalchemy_utils.database_exists(engine.url):
+            LOGGER.info("Database created. {:}".format(engine.url))
+            sqlalchemy_utils.create_database(engine.url)
+
+    @staticmethod
+    def drop_database(engine : sqlalchemy.engine.Engine) -> None:
+        if sqlalchemy_utils.database_exists(engine.url):
+            sqlalchemy_utils.drop_database(engine.url)
+
+    # def create_database(self):
+    #     try:
+    #         with self.db_engine.connect() as connection:
+    #             connection.execute(f"CREATE DATABASE {self.db_name};")
+    #         LOGGER.info('managementDB initalizes database. Name: {self.db_name}')
+    #         return True
+    #     except: 
+    #         LOGGER.exception('Failed to check/create the database: {:s}'.format(str(self.db_engine.url)))
+    #         return False
+    
+    @staticmethod
+    def create_tables(engine : sqlalchemy.engine.Engine):
+        try:
+            Base.metadata.create_all(engine)     # type: ignore
+            LOGGER.info("Tables created in the DB Name: {:}".format(DB_NAME))
+        except Exception as e:
+            LOGGER.info("Tables cannot be created in the TelemetryFrontend database. {:s}".format(str(e)))
+
+    def verify_tables(self):
+        try:
+            with self.db_engine.connect() as connection:
+                result = connection.execute("SHOW TABLES;")
+                tables = result.fetchall()      # type: ignore
+                LOGGER.info("Tables verified: {:}".format(tables))
+        except Exception as e:
+            LOGGER.info("Unable to fetch Table names. {:s}".format(str(e)))
+
+    @staticmethod
+    def add_row_to_db(self, row):
+        session = self.Session()
+        try:
+            session.add(row)
+            session.commit()
+            LOGGER.info(f"Row inserted into {row.__class__.__name__} table.")
+        except Exception as e:
+            session.rollback()
+            LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}")
+        finally:
+            session.close()
+    
+    def search_db_row_by_id(self, model, col_name, id_to_search):
+        session = self.Session()
+        try:
+            entity = session.query(model).filter_by(**{col_name: id_to_search}).first()
+            if entity:
+                LOGGER.info(f"{model.__name__} ID found: {str(entity)}")
+                return entity
+            else:
+                LOGGER.warning(f"{model.__name__} ID not found: {str(id_to_search)}")
+                return None
+        except Exception as e:
+            session.rollback()
+            LOGGER.info(f"Failed to retrieve {model.__name__} ID. {str(e)}")
+            raise
+        finally:
+            session.close()
+    
+    def delete_db_row_by_id(self, model, col_name, id_to_search):
+        session = self.Session()
+        try:
+            record = session.query(model).filter_by(**{col_name: id_to_search}).first()
+            if record:
+                session.delete(record)
+                session.commit()
+                LOGGER.info("Deleted %s with %s: %s", model.__name__, col_name, id_to_search)
+            else:
+                LOGGER.warning("%s with %s %s not found", model.__name__, col_name, id_to_search)
+        except Exception as e:
+            session.rollback()
+            LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e)
+        finally:
+            session.close()
+    
+    def select_with_filter(self, model, **filters):
+        session = self.Session()
+        try:
+            query = session.query(model)
+            for column, value in filters.items():
+                query = query.filter(getattr(model, column) == value) # type: ignore   
+            result = query.all()
+            if result:
+                LOGGER.info(f"Fetched filtered rows from {model.__name__} table with filters: {filters}") #  - Results: {result}
+            else:
+                LOGGER.warning(f"No matching row found in {model.__name__} table with filters: {filters}")
+            return result
+        except Exception as e:
+            LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filters} ::: {e}")
+            return []
+        finally:
+            session.close()
\ No newline at end of file
diff --git a/src/telemetry/database/tests/__init__.py b/src/telemetry/database/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..839e45e3b646bc60de7edd81fcfb91b7b38feadf
--- /dev/null
+++ b/src/telemetry/database/tests/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
\ No newline at end of file
diff --git a/src/telemetry/database/tests/managementDBtests.py b/src/telemetry/database/tests/managementDBtests.py
new file mode 100644
index 0000000000000000000000000000000000000000..24138abe42be742bd9b16d7840343f9d7c7fe133
--- /dev/null
+++ b/src/telemetry/database/tests/managementDBtests.py
@@ -0,0 +1,22 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from telemetry.database.managementDB import managementDB
+from telemetry.database.tests.messages import create_collector_model_object
+
+
+def test_add_row_to_db():
+    managementDBobj = managementDB()
+    managementDBobj.add_row_to_db(create_collector_model_object())
\ No newline at end of file
diff --git a/src/telemetry/database/tests/messages.py b/src/telemetry/database/tests/messages.py
new file mode 100644
index 0000000000000000000000000000000000000000..6919eecc62da0794869f334c4de85cb129fbab14
--- /dev/null
+++ b/src/telemetry/database/tests/messages.py
@@ -0,0 +1,80 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+import uuid
+import random
+from common.proto import telemetry_frontend_pb2
+from common.proto import kpi_manager_pb2
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from telemetry.database.TelemetryModel import Collector as CollectorModel
+
+
+def create_collector_request():
+    _create_collector_request                                = telemetry_frontend_pb2.Collector()
+    _create_collector_request.collector_id.collector_id.uuid = str(uuid.uuid4())
+    _create_collector_request.kpi_id.kpi_id.uuid             = '71d58648-bf47-49ac-996f-e63a9fbfead4' # must be primary key in kpi table
+    # _create_collector_request.kpi_id.kpi_id.uuid             = str(uuid.uuid4())
+    _create_collector_request.duration_s                     = float(random.randint(8, 16))
+    _create_collector_request.interval_s                     = float(random.randint(2, 4))
+    return _create_collector_request
+
+def create_kpi_request():
+    _create_kpi_request                                     = kpi_manager_pb2.KpiDescriptor()
+    _create_kpi_request.kpi_id.kpi_id.uuid                  = str(uuid.uuid4())
+    _create_kpi_request.kpi_description                     = 'KPI Description Test'
+    _create_kpi_request.kpi_sample_type                     = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
+    _create_kpi_request.service_id.service_uuid.uuid        = 'SERV' 
+    _create_kpi_request.device_id.device_uuid.uuid          = 'DEV'  
+    _create_kpi_request.slice_id.slice_uuid.uuid            = 'SLC'  
+    _create_kpi_request.endpoint_id.endpoint_uuid.uuid      = 'END'  
+    _create_kpi_request.connection_id.connection_uuid.uuid  = 'CON'  
+    # _create_kpi_request.link_id.link_id.uuid                = 'LNK'
+    return _create_kpi_request
+
+def create_kpi_id_request():
+    _create_kpi_id_request             = kpi_manager_pb2.KpiId()
+    _create_kpi_id_request.kpi_id.uuid = '71d58648-bf47-49ac-996f-e63a9fbfead4'
+    return _create_kpi_id_request
+
+def create_collector_id_request():
+    _create_collector_id_request                   = telemetry_frontend_pb2.CollectorId()
+    _create_collector_id_request.collector_id.uuid = '71d58648-bf47-49ac-996f-e63a9fbfead4'
+    return _create_collector_id_request
+
+def create_kpi_filter_request():
+    # create a dict as follows: 'Key' = 'KpiModel' column name and 'Value' = filter to apply.
+    _create_kpi_filter_request                    = dict()
+    _create_kpi_filter_request['kpi_sample_type'] = 102
+    _create_kpi_filter_request['kpi_id']          = '3a17230d-8e95-4afb-8b21-6965481aee5a'
+    return _create_kpi_filter_request
+
+def create_collector_filter_request():
+    # create a dict as follows: 'Key' = 'KpiModel' column name and 'Value' = filter to apply.
+    _create_kpi_filter_request                        = dict()
+    _create_kpi_filter_request['sampling_interval_s'] = 3.0
+    # _create_kpi_filter_request['kpi_id']              = '11e2c6c6-b507-40aa-ab3a-ffd41e7125f0'
+    return _create_kpi_filter_request
+
+def create_collector_model_object():
+    # Create a new Collector instance
+    collector_to_insert                     = CollectorModel()
+    collector_to_insert.collector_id        = str(uuid.uuid4())
+    collector_to_insert.kpi_id              = '3a17230d-8e95-4afb-8b21-6965481aee5a'
+    collector_to_insert.collector           = "Test collector description"
+    collector_to_insert.sampling_duration_s = 15
+    collector_to_insert.sampling_interval_s = 3
+    collector_to_insert.start_timestamp     = time.time()
+    collector_to_insert.end_timestamp       = time.time()
+    return collector_to_insert
\ No newline at end of file
diff --git a/src/telemetry/database/tests/telemetryDBtests.py b/src/telemetry/database/tests/telemetryDBtests.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d221106419d6e4ee4b313adf10c90c5e6be7666
--- /dev/null
+++ b/src/telemetry/database/tests/telemetryDBtests.py
@@ -0,0 +1,86 @@
+
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Any
+from sqlalchemy.ext.declarative import declarative_base
+from telemetry.database.TelemetryDBmanager import TelemetryDBmanager
+from telemetry.database.TelemetryEngine import TelemetryEngine
+from telemetry.database.tests import temp_DB
+from .messages import create_kpi_request, create_collector_request, \
+                        create_kpi_id_request, create_kpi_filter_request, \
+                        create_collector_id_request, create_collector_filter_request
+
+logging.basicConfig(level=logging.INFO)
+LOGGER = logging.getLogger(__name__)
+
+
+# def test_temp_DB():
+#     temp_DB.main()
+
+def test_telemetry_object_creation():
+    LOGGER.info('--- test_telemetry_object_creation: START')
+
+    LOGGER.info('>>> Creating TelemetryDBmanager Object <<< ')
+    TelemetryDBmanagerObj = TelemetryDBmanager()
+    TelemetryEngine.create_database(TelemetryDBmanagerObj.db_engine)        # creates 'frontend' db, if it doesnot exists.
+
+    LOGGER.info('>>> Creating database <<< ')
+    TelemetryDBmanagerObj.create_database()
+
+    LOGGER.info('>>> verifing database <<< ')
+    TelemetryDBmanagerObj.list_databases()
+
+    # # LOGGER.info('>>> Droping Tables: ')
+    # # TelemetryDBmanagerObj.drop_table("table_naem_here")
+
+    LOGGER.info('>>> Creating Tables <<< ')
+    TelemetryDBmanagerObj.create_tables()
+
+    LOGGER.info('>>> Verifing Table creation <<< ')
+    TelemetryDBmanagerObj.verify_tables()
+
+    # LOGGER.info('>>> TESTING: Row Insertion Operation: kpi Table <<<')
+    # kpi_obj = create_kpi_request()
+    # TelemetryDBmanagerObj.inser_kpi(kpi_obj)
+
+    # LOGGER.info('>>> TESTING: Row Insertion Operation: collector Table <<<')
+    # collector_obj = create_collector_request()
+    # TelemetryDBmanagerObj.insert_collector(collector_obj)
+
+    # LOGGER.info('>>> TESTING: Get KpiDescriptor  <<<')
+    # kpi_id_obj = create_kpi_id_request()
+    # TelemetryDBmanagerObj.get_kpi_descriptor(kpi_id_obj)
+
+    # LOGGER.info('>>> TESTING: Select Collector  <<<')
+    # collector_id_obj = create_collector_id_request()
+    # TelemetryDBmanagerObj.get_collector(collector_id_obj)
+
+    # LOGGER.info('>>> TESTING: Applying kpi filter  <<< ')
+    # kpi_filter : dict[str, Any] = create_kpi_filter_request()
+    # TelemetryDBmanagerObj.select_kpi_descriptor(**kpi_filter)
+
+    # LOGGER.info('>>> TESTING: Applying collector filter   <<<')
+    # collector_filter : dict[str, Any] = create_collector_filter_request()
+    # TelemetryDBmanagerObj.select_collector(**collector_filter)
+    
+    # LOGGER.info('>>> TESTING: Delete KpiDescriptor ')
+    # kpi_id_obj = create_kpi_id_request()
+    # TelemetryDBmanagerObj.delete_kpi_descriptor(kpi_id_obj)
+
+    # LOGGER.info('>>> TESTING: Delete Collector ')
+    # collector_id_obj = create_collector_id_request()
+    # TelemetryDBmanagerObj.delete_collector(collector_id_obj)
+    
\ No newline at end of file
diff --git a/src/telemetry/database/tests/temp_DB.py b/src/telemetry/database/tests/temp_DB.py
new file mode 100644
index 0000000000000000000000000000000000000000..089d3542492c2da87b839416f7118749bb82caad
--- /dev/null
+++ b/src/telemetry/database/tests/temp_DB.py
@@ -0,0 +1,327 @@
+from sqlalchemy import create_engine, Column, String, Integer, Text, Float, ForeignKey
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import sessionmaker, relationship
+from sqlalchemy.dialects.postgresql import UUID
+import logging
+
+LOGGER = logging.getLogger(__name__)
+Base = declarative_base()
+
+class Kpi(Base):
+    __tablename__ = 'kpi'
+
+    kpi_id          = Column(UUID(as_uuid=False), primary_key=True)
+    kpi_description = Column(Text)
+    kpi_sample_type = Column(Integer)
+    device_id       = Column(String)
+    endpoint_id     = Column(String)
+    service_id      = Column(String)
+    slice_id        = Column(String)
+    connection_id   = Column(String)
+    link_id         = Column(String)
+
+    collectors = relationship('Collector', back_populates='kpi')
+
+    def __repr__(self):
+        return (f"<Kpi(kpi_id='{self.kpi_id}', kpi_description='{self.kpi_description}', "
+                f"kpi_sample_type='{self.kpi_sample_type}', device_id='{self.device_id}', "
+                f"endpoint_id='{self.endpoint_id}', service_id='{self.service_id}', "
+                f"slice_id='{self.slice_id}', connection_id='{self.connection_id}', "
+                f"link_id='{self.link_id}')>")
+    
+class Collector(Base):
+    __tablename__ = 'collector'
+
+    collector_id        = Column(UUID(as_uuid=False), primary_key=True)
+    kpi_id              = Column(UUID(as_uuid=False), ForeignKey('kpi.kpi_id'))
+    collector           = Column(String)
+    sampling_duration_s = Column(Float)
+    sampling_interval_s = Column(Float)
+    start_timestamp     = Column(Float)
+    end_timestamp       = Column(Float)
+
+    kpi = relationship('Kpi', back_populates='collectors')
+
+    def __repr__(self):
+        return (f"<Collector(collector_id='{self.collector_id}', kpi_id='{self.kpi_id}', "
+                f"collector='{self.collector}', sampling_duration_s='{self.sampling_duration_s}', "
+                f"sampling_interval_s='{self.sampling_interval_s}', start_timestamp='{self.start_timestamp}', "
+                f"end_timestamp='{self.end_timestamp}')>")
+
+class DatabaseManager:
+    def __init__(self, db_url, db_name):
+        self.engine = create_engine(db_url)
+        self.db_name = db_name
+        self.Session = sessionmaker(bind=self.engine)
+        LOGGER.info("DatabaseManager initialized with DB URL: %s and DB Name: %s", db_url, db_name)
+
+    def create_database(self):
+        try:
+            with self.engine.connect() as connection:
+                connection.execute(f"CREATE DATABASE {self.db_name};")
+            LOGGER.info("Database '%s' created successfully.", self.db_name)
+        except Exception as e:
+            LOGGER.error("Error creating database '%s': %s", self.db_name, e)
+        finally:
+            LOGGER.info("create_database method execution finished.")
+
+    def create_tables(self):
+        try:
+            Base.metadata.create_all(self.engine)
+            LOGGER.info("Tables created successfully.")
+        except Exception as e:
+            LOGGER.error("Error creating tables: %s", e)
+        finally:
+            LOGGER.info("create_tables method execution finished.")
+
+    def verify_table_creation(self):
+        try:
+            with self.engine.connect() as connection:
+                result = connection.execute("SHOW TABLES;")
+                tables = result.fetchall()
+                LOGGER.info("Tables verified: %s", tables)
+                return tables
+        except Exception as e:
+            LOGGER.error("Error verifying table creation: %s", e)
+            return []
+        finally:
+            LOGGER.info("verify_table_creation method execution finished.")
+
+    def insert_row_kpi(self, kpi_data):
+        session = self.Session()
+        try:
+            new_kpi = Kpi(**kpi_data)
+            session.add(new_kpi)
+            session.commit()
+            LOGGER.info("Inserted row into KPI table: %s", kpi_data)
+        except Exception as e:
+            session.rollback()
+            LOGGER.error("Error inserting row into KPI table: %s", e)
+        finally:
+            session.close()
+            LOGGER.info("insert_row_kpi method execution finished.")
+
+    def insert_row_collector(self, collector_data):
+        session = self.Session()
+        try:
+            new_collector = Collector(**collector_data)
+            session.add(new_collector)
+            session.commit()
+            LOGGER.info("Inserted row into Collector table: %s", collector_data)
+        except Exception as e:
+            session.rollback()
+            LOGGER.error("Error inserting row into Collector table: %s", e)
+        finally:
+            session.close()
+            LOGGER.info("insert_row_collector method execution finished.")
+
+    def verify_insertion_kpi(self, kpi_id):
+        session = self.Session()
+        try:
+            kpi = session.query(Kpi).filter_by(kpi_id=kpi_id).first()
+            LOGGER.info("Verified insertion in KPI table for kpi_id: %s, Result: %s", kpi_id, kpi)
+            return kpi
+        except Exception as e:
+            LOGGER.error("Error verifying insertion in KPI table for kpi_id %s: %s", kpi_id, e)
+            return None
+        finally:
+            session.close()
+            LOGGER.info("verify_insertion_kpi method execution finished.")
+
+    def verify_insertion_collector(self, collector_id):
+        session = self.Session()
+        try:
+            collector = session.query(Collector).filter_by(collector_id=collector_id).first()
+            LOGGER.info("Verified insertion in Collector table for collector_id: %s, Result: %s", collector_id, collector)
+            return collector
+        except Exception as e:
+            LOGGER.error("Error verifying insertion in Collector table for collector_id %s: %s", collector_id, e)
+            return None
+        finally:
+            session.close()
+            LOGGER.info("verify_insertion_collector method execution finished.")
+
+    def get_all_kpi_rows(self):
+        session = self.Session()
+        try:
+            kpi_rows = session.query(Kpi).all()
+            LOGGER.info("Fetched all rows from KPI table: %s", kpi_rows)
+            return kpi_rows
+        except Exception as e:
+            LOGGER.error("Error fetching all rows from KPI table: %s", e)
+            return []
+        finally:
+            session.close()
+            LOGGER.info("get_all_kpi_rows method execution finished.")
+
+    def get_all_collector_rows(self):
+        session = self.Session()
+        try:
+            collector_rows = session.query(Collector).all()
+            LOGGER.info("Fetched all rows from Collector table: %s", collector_rows)
+            return collector_rows
+        except Exception as e:
+            LOGGER.error("Error fetching all rows from Collector table: %s", e)
+            return []
+        finally:
+            session.close()
+            LOGGER.info("get_all_collector_rows method execution finished.")
+
+    def get_filtered_kpi_rows(self, **filters):
+        session = self.Session()
+        try:
+            query = session.query(Kpi)
+            for column, value in filters.items():
+                query = query.filter(getattr(Kpi, column) == value)
+            result = query.all()
+            LOGGER.info("Fetched filtered rows from KPI table with filters ---------- : {:s}".format(str(result)))
+            return result
+        except NoResultFound:
+            LOGGER.warning("No results found in KPI table with filters %s", filters)
+            return []
+        except Exception as e:
+            LOGGER.error("Error fetching filtered rows from KPI table with filters %s: %s", filters, e)
+            return []
+        finally:
+            session.close()
+            LOGGER.info("get_filtered_kpi_rows method execution finished.")
+
+    def get_filtered_collector_rows(self, **filters):
+        session = self.Session()
+        try:
+            query = session.query(Collector)
+            for column, value in filters.items():
+                query = query.filter(getattr(Collector, column) == value)
+            result = query.all()
+            LOGGER.info("Fetched filtered rows from Collector table with filters %s: %s", filters, result)
+            return result
+        except NoResultFound:
+            LOGGER.warning("No results found in Collector table with filters %s", filters)
+            return []
+        except Exception as e:
+            LOGGER.error("Error fetching filtered rows from Collector table with filters %s: %s", filters, e)
+            return []
+        finally:
+            session.close()
+            LOGGER.info("get_filtered_collector_rows method execution finished.")
+
+    def delete_kpi_by_id(self, kpi_id):
+        session = self.Session()
+        try:
+            kpi = session.query(Kpi).filter_by(kpi_id=kpi_id).first()
+            if kpi:
+                session.delete(kpi)
+                session.commit()
+                LOGGER.info("Deleted KPI with kpi_id: %s", kpi_id)
+            else:
+                LOGGER.warning("KPI with kpi_id %s not found", kpi_id)
+        except SQLAlchemyError as e:
+            session.rollback()
+            LOGGER.error("Error deleting KPI with kpi_id %s: %s", kpi_id, e)
+        finally:
+            session.close()
+            LOGGER.info("delete_kpi_by_id method execution finished.")
+
+    def delete_collector_by_id(self, collector_id):
+        session = self.Session()
+        try:
+            collector = session.query(Collector).filter_by(collector_id=collector_id).first()
+            if collector:
+                session.delete(collector)
+                session.commit()
+                LOGGER.info("Deleted Collector with collector_id: %s", collector_id)
+            else:
+                LOGGER.warning("Collector with collector_id %s not found", collector_id)
+        except SQLAlchemyError as e:
+            session.rollback()
+            LOGGER.error("Error deleting Collector with collector_id %s: %s", collector_id, e)
+        finally:
+            session.close()
+            LOGGER.info("delete_collector_by_id method execution finished.")
+
+
+# Example Usage
+def main():
+    CRDB_SQL_PORT  = "26257"
+    CRDB_DATABASE  = "telemetryfrontend"
+    CRDB_USERNAME  = "tfs"
+    CRDB_PASSWORD  = "tfs123"
+    CRDB_SSLMODE   = "require"    
+    CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}'
+    crdb_uri = CRDB_URI_TEMPLATE.format(
+            CRDB_USERNAME, CRDB_PASSWORD, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
+    # db_url = "cockroachdb://username:password@localhost:26257/"
+    # db_name = "yourdatabase"
+    db_manager = DatabaseManager(crdb_uri, CRDB_DATABASE)
+
+    # Create database
+    db_manager.create_database()
+
+    # Update db_url to include the new database name
+    db_manager.engine = create_engine(f"{crdb_uri}")
+    db_manager.Session = sessionmaker(bind=db_manager.engine)
+
+    # Create tables
+    db_manager.create_tables()
+
+    # Verify table creation
+    tables = db_manager.verify_table_creation()
+    LOGGER.info('Tables in the database: {:s}'.format(str(tables)))    
+
+    # Insert a row into the KPI table
+    kpi_data = {
+        'kpi_id': '123e4567-e89b-12d3-a456-426614174100',
+        'kpi_description': 'Sample KPI',
+        'kpi_sample_type': 1,
+        'device_id': 'device_1',
+        'endpoint_id': 'endpoint_1',
+        'service_id': 'service_1',
+        'slice_id': 'slice_1',
+        'connection_id': 'conn_1',
+        'link_id': 'link_1'
+    }
+    db_manager.insert_row_kpi(kpi_data)
+
+    # Insert a row into the Collector table
+    collector_data = {
+        'collector_id': '123e4567-e89b-12d3-a456-426614174101',
+        'kpi_id': '123e4567-e89b-12d3-a456-426614174000',
+        'collector': 'Collector 1',
+        'sampling_duration_s': 60.0,
+        'sampling_interval_s': 10.0,
+        'start_timestamp': 1625247600.0,
+        'end_timestamp': 1625247660.0
+    }
+    db_manager.insert_row_collector(collector_data)
+
+    # Verify insertion into KPI table
+    kpi = db_manager.verify_insertion_kpi('123e4567-e89b-12d3-a456-426614174000')
+    print("Inserted KPI:", kpi)
+
+    # Verify insertion into Collector table
+    collector = db_manager.verify_insertion_collector('123e4567-e89b-12d3-a456-426614174001')
+    print("Inserted Collector:", collector)
+
+    # Get all rows from KPI table
+    all_kpi_rows = db_manager.get_all_kpi_rows()
+    LOGGER.info("All KPI Rows: %s", all_kpi_rows)
+
+    # Get all rows from Collector table
+    all_collector_rows = db_manager.get_all_collector_rows()
+    LOGGER.info("All Collector Rows: %s", all_collector_rows)
+
+    # Get filtered rows from KPI table
+    filtered_kpi_rows = db_manager.get_filtered_kpi_rows(kpi_description='Sample KPI')
+    LOGGER.info("Filtered KPI Rows: %s", filtered_kpi_rows)
+
+    # Get filtered rows from Collector table
+    filtered_collector_rows = db_manager.get_filtered_collector_rows(collector='Collector 1')
+    LOGGER.info("Filtered Collector Rows: %s", filtered_collector_rows)
+
+    # Delete a KPI by kpi_id
+    kpi_id_to_delete = '123e4567-e89b-12d3-a456-426614174000'
+    db_manager.delete_kpi_by_id(kpi_id_to_delete)
+
+    # Delete a Collector by collector_id
+    collector_id_to_delete = '123e4567-e89b-12d3-a456-426614174001'
+    db_manager.delete_collector_by_id(collector_id_to_delete)
diff --git a/src/telemetry/frontend/__init__.py b/src/telemetry/frontend/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..234a1af6588c91f6a17f3963f69120cd6e2248d9
--- /dev/null
+++ b/src/telemetry/frontend/__init__.py
@@ -0,0 +1,15 @@
+
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/telemetry/frontend/client/TelemetryFrontendClient.py b/src/telemetry/frontend/client/TelemetryFrontendClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd36ecd45933ad10758e408cf03c1bf834d27ba6
--- /dev/null
+++ b/src/telemetry/frontend/client/TelemetryFrontendClient.py
@@ -0,0 +1,70 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
+
+from common.proto.context_pb2 import Empty
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.proto.telemetry_frontend_pb2_grpc import TelemetryFrontendServiceStub
+from common.proto.telemetry_frontend_pb2 import Collector, CollectorId, CollectorFilter, CollectorList
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 10
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class TelemetryFrontendClient:
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.TELEMETRYFRONTEND)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
+        self.channel = None
+        self.stub = None
+        self.connect()
+        LOGGER.debug('Channel created')        
+
+    def connect(self):
+        self.channel = grpc.insecure_channel(self.endpoint)
+        self.stub = TelemetryFrontendServiceStub(self.channel)
+
+    def close(self):
+        if self.channel is not None: self.channel.close()
+        self.channel = None
+        self.stub = None
+
+    @RETRY_DECORATOR
+    def StartCollector(self, request : Collector) -> CollectorId: # type: ignore
+        LOGGER.debug('StartCollector: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.StartCollector(request)
+        LOGGER.debug('StartCollector result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def StopCollector(self, request : CollectorId) -> Empty: # type: ignore
+        LOGGER.debug('StopCollector: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.StopCollector(request)
+        LOGGER.debug('StopCollector result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def SelectCollectors(self, request : CollectorFilter) -> CollectorList: # type: ignore
+        LOGGER.debug('SelectCollectors: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SelectCollectors(request)
+        LOGGER.debug('SelectCollectors result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
diff --git a/src/telemetry/frontend/client/__init__.py b/src/telemetry/frontend/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/telemetry/frontend/client/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/telemetry/frontend/service/TelemetryFrontendService.py b/src/telemetry/frontend/service/TelemetryFrontendService.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc3f8df363a882db0f0ba3112a38f3bba3921c30
--- /dev/null
+++ b/src/telemetry/frontend/service/TelemetryFrontendService.py
@@ -0,0 +1,30 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from monitoring.service.NameMapping import NameMapping
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from common.proto.telemetry_frontend_pb2_grpc import add_TelemetryFrontendServiceServicer_to_server
+from telemetry.frontend.service.TelemetryFrontendServiceServicerImpl import TelemetryFrontendServiceServicerImpl
+
+
+class TelemetryFrontendService(GenericGrpcService):
+    def __init__(self, name_mapping : NameMapping, cls_name: str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND)
+        super().__init__(port, cls_name=cls_name)
+        self.telemetry_frontend_servicer = TelemetryFrontendServiceServicerImpl(name_mapping)
+
+    def install_servicers(self):
+        add_TelemetryFrontendServiceServicer_to_server(self.telemetry_frontend_servicer, self.server)
diff --git a/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py b/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6830ad676d3934c88b01575ebdd1d0549fb00d1
--- /dev/null
+++ b/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py
@@ -0,0 +1,204 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import ast
+import threading
+import time
+from typing import Tuple, Any
+import grpc
+import logging
+
+from confluent_kafka import Consumer as KafkaConsumer
+from common.proto.context_pb2 import Empty
+from monitoring.service.NameMapping import NameMapping
+from confluent_kafka import Producer as KafkaProducer
+from confluent_kafka import KafkaException
+from confluent_kafka import KafkaError
+from common.proto.telemetry_frontend_pb2 import CollectorId, Collector, CollectorFilter, CollectorList
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+from common.proto.telemetry_frontend_pb2_grpc import TelemetryFrontendServiceServicer
+
+from telemetry.database.TelemetryModel import Collector as CollectorModel
+from telemetry.database.managementDB import managementDB
+
+LOGGER            = logging.getLogger(__name__)
+METRICS_POOL      = MetricsPool('Monitoring', 'TelemetryFrontend')
+KAFKA_SERVER_IP   = '127.0.0.1:9092'
+ACTIVE_COLLECTORS = []
+KAFKA_TOPICS      = {'request' : 'topic_request', 
+                     'response': 'topic_response'}
+
+
+class TelemetryFrontendServiceServicerImpl(TelemetryFrontendServiceServicer):
+    def __init__(self, name_mapping : NameMapping):
+        LOGGER.info('Init TelemetryFrontendService')
+        self.managementDBobj = managementDB()
+        self.kafka_producer = KafkaProducer({'bootstrap.servers': KAFKA_SERVER_IP,})
+        self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KAFKA_SERVER_IP,
+                                            'group.id'          : 'frontend',
+                                            'auto.offset.reset' : 'latest'})
+
+    def add_collector_to_db(self, request: Collector ): # type: ignore
+        try:
+            # Create a new Collector instance
+            collector_to_insert                     = CollectorModel()
+            collector_to_insert.collector_id        = request.collector_id.collector_id.uuid
+            collector_to_insert.kpi_id              = request.kpi_id.kpi_id.uuid
+            # collector_to_insert.collector_decription= request.collector
+            collector_to_insert.sampling_duration_s = request.duration_s
+            collector_to_insert.sampling_interval_s = request.interval_s
+            collector_to_insert.start_timestamp     = time.time()
+            collector_to_insert.end_timestamp       = time.time()
+            managementDB.add_row_to_db(collector_to_insert)
+        except Exception as e:
+            LOGGER.info("Unable to create collectorModel class object. {:}".format(e))
+
+    # @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def StartCollector(self, 
+                       request : Collector, grpc_context: grpc.ServicerContext # type: ignore
+                      ) -> CollectorId: # type: ignore
+        # push info to frontend db
+        LOGGER.info ("gRPC message: {:}".format(request))
+        response = CollectorId()
+        _collector_id       = str(request.collector_id.collector_id.uuid)
+        _collector_kpi_id   = str(request.kpi_id.kpi_id.uuid)
+        _collector_duration = int(request.duration_s)
+        _collector_interval = int(request.interval_s)
+        # pushing Collector to DB
+        self.add_collector_to_db(request)
+        self.publish_to_kafka_request_topic(_collector_id, _collector_kpi_id, _collector_duration, _collector_interval)
+        # self.run_publish_to_kafka_request_topic(_collector_id, _collector_kpi_id, _collector_duration, _collector_interval)
+        response.collector_id.uuid = request.collector_id.collector_id.uuid # type: ignore
+        return response
+    
+    def run_publish_to_kafka_request_topic(self, msg_key: str, kpi: str, duration : int, interval: int):
+        # Add threading.Thread() response to dictonary and call start() in the next statement
+        threading.Thread(target=self.publish_to_kafka_request_topic, args=(msg_key, kpi, duration, interval)).start()
+
+    def publish_to_kafka_request_topic(self, 
+                             collector_id: str, kpi: str, duration : int, interval: int
+                             ):
+        """
+        Method to generate collector request to Kafka topic.
+        """
+        # time.sleep(5)
+        # producer_configs = {
+        #     'bootstrap.servers': KAFKA_SERVER_IP,
+        # }
+        # topic_request = "topic_request"
+        msg_value : Tuple [str, int, int] = (kpi, duration, interval)
+        # print ("Request generated: ", "Colletcor Id: ", collector_id, \
+        #         ", \nKPI: ", kpi, ", Duration: ", duration, ", Interval: ", interval)
+        # producerObj = KafkaProducer(producer_configs)
+        self.kafka_producer.produce(KAFKA_TOPICS['request'], key=collector_id, value= str(msg_value), callback=self.delivery_callback)
+        # producerObj.produce(KAFKA_TOPICS['request'], key=collector_id, value= str(msg_value), callback=self.delivery_callback)
+        LOGGER.info("Collector Request Generated: {:}, {:}, {:}, {:}".format(collector_id, kpi, duration, interval))
+        # producerObj.produce(topic_request, key=collector_id, value= str(msg_value), callback=self.delivery_callback)
+        ACTIVE_COLLECTORS.append(collector_id)
+        self.kafka_producer.flush()
+
+    def run_kafka_listener(self):
+        # print ("--- STARTED: run_kafka_listener ---")
+        threading.Thread(target=self.kafka_listener).start()
+        return True
+
+    def kafka_listener(self):
+        """
+        listener for response on Kafka topic.
+        """
+        # # print ("--- STARTED: kafka_listener ---")
+        # conusmer_configs = {
+        #     'bootstrap.servers' : KAFKA_SERVER_IP,
+        #     'group.id'          : 'frontend',
+        #     'auto.offset.reset' : 'latest'
+        # }
+        # # topic_response = "topic_response"
+
+        # consumerObj = KafkaConsumer(conusmer_configs)
+        self.kafka_consumer.subscribe([KAFKA_TOPICS['response']])
+        # print (time.time())
+        while True:
+            receive_msg = self.kafka_consumer.poll(2.0)
+            if receive_msg is None:
+                # print (" - Telemetry frontend listening on Kafka Topic: ", KAFKA_TOPICS['response'])     # added for debugging purposes
+                continue
+            elif receive_msg.error():
+                if receive_msg.error().code() == KafkaError._PARTITION_EOF:
+                    continue
+                else:
+                    print("Consumer error: {}".format(receive_msg.error()))
+                    break
+            try:
+                collector_id = receive_msg.key().decode('utf-8')
+                if collector_id in ACTIVE_COLLECTORS:
+                    (kpi_id, kpi_value) = ast.literal_eval(receive_msg.value().decode('utf-8'))
+                    self.process_response(collector_id, kpi_id, kpi_value)
+                else:
+                    print(f"collector id does not match.\nRespone ID: '{collector_id}' --- Active IDs: '{ACTIVE_COLLECTORS}' ")
+            except Exception as e:
+                print(f"No message key found: {str(e)}")
+                continue
+                # return None
+
+    def process_response(self, collector_id: str, kpi_id: str, kpi_value: Any):
+        if kpi_id == "-1" and kpi_value == -1:
+            # LOGGER.info("Sucessfully terminated Collector: {:}".format(collector_id))
+            print ("Sucessfully terminated Collector: ", collector_id)
+        else:
+            print ("Frontend-Received values Collector Id:", collector_id, "-KPI:", kpi_id, "-VALUE:", kpi_value)
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def delivery_callback(self, err, msg):
+        """
+        Callback function to handle message delivery status.
+        Args:
+            err (KafkaError): Kafka error object.
+            msg (Message): Kafka message object.
+        """
+        if err:
+            print(f'Message delivery failed: {err}')
+        else:
+            print(f'Message delivered to topic {msg.topic()}')
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def StopCollector(self, 
+                      request : CollectorId, grpc_context: grpc.ServicerContext # type: ignore
+                     ) -> Empty:  # type: ignore
+        LOGGER.info ("gRPC message: {:}".format(request))
+        _collector_id = request.collector_id.uuid
+        self.publish_to_kafka_request_topic(_collector_id, "", -1, -1)
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectCollectors(self, 
+                         request : CollectorFilter, contextgrpc_context: grpc.ServicerContext # type: ignore
+                        ) -> CollectorList:  # type: ignore
+        LOGGER.info("gRPC message: {:}".format(request))
+        response = CollectorList()
+        filter_to_apply = dict()
+        filter_to_apply['kpi_id']       = request.kpi_id[0].kpi_id.uuid
+        # filter_to_apply['duration_s'] = request.duration_s[0]
+        try:
+            rows = self.managementDBobj.select_with_filter(CollectorModel, **filter_to_apply)
+        except Exception as e:
+            LOGGER.info('Unable to apply filter on kpi descriptor. {:}'.format(e))
+        try:
+            if len(rows) != 0:
+                for row in rows:
+                    collector_obj = Collector()
+                    collector_obj.collector_id.collector_id.uuid = row.collector_id
+                    response.collector_list.append(collector_obj)
+            return response
+        except Exception as e:
+            LOGGER.info('Unable to process response {:}'.format(e))
\ No newline at end of file
diff --git a/src/telemetry/frontend/service/__init__.py b/src/telemetry/frontend/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/telemetry/frontend/service/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/telemetry/frontend/service/__main__.py b/src/telemetry/frontend/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b0263706c3dad3756306d1ba8a3a104d568cd6f
--- /dev/null
+++ b/src/telemetry/frontend/service/__main__.py
@@ -0,0 +1,72 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import signal
+import sys
+import logging, threading
+from prometheus_client import start_http_server
+from monitoring.service.NameMapping import NameMapping
+from .TelemetryFrontendService import TelemetryFrontendService
+from monitoring.service.EventTools import EventsDeviceCollector
+from common.Settings import (
+    get_log_level, wait_for_environment_variables, get_env_var_name, 
+    get_metrics_port )
+
+terminate = threading.Event()
+LOGGER = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global LOGGER
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+    LOGGER = logging.getLogger(__name__)
+
+# ------- will be added later --------------
+    # wait_for_environment_variables([
+    #     get_env_var_name
+
+
+    # ])
+# ------- will be added later --------------
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.info('Starting...')
+
+    # Start metrics server
+    metrics_port = get_metrics_port()
+    start_http_server(metrics_port)
+
+    name_mapping = NameMapping()
+
+    grpc_service = TelemetryFrontendService(name_mapping)
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=1.0): pass
+
+    LOGGER.info('Terminating...')
+    grpc_service.stop()
+
+    LOGGER.info('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
\ No newline at end of file
diff --git a/src/telemetry/frontend/tests/Messages.py b/src/telemetry/frontend/tests/Messages.py
new file mode 100644
index 0000000000000000000000000000000000000000..1205898d13a610cd262979242e4f489f5e35cdb8
--- /dev/null
+++ b/src/telemetry/frontend/tests/Messages.py
@@ -0,0 +1,83 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid
+import random
+from common.proto import telemetry_frontend_pb2
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+
+
+# ----------------------- "2nd" Iteration --------------------------------
+def create_collector_id():
+    _collector_id                   = telemetry_frontend_pb2.CollectorId()
+    _collector_id.collector_id.uuid = uuid.uuid4()
+    return _collector_id
+
+# def create_collector_id_a(coll_id_str : str):
+#     _collector_id                   = telemetry_frontend_pb2.CollectorId()
+#     _collector_id.collector_id.uuid = str(coll_id_str)
+#     return _collector_id
+
+def create_collector_request():
+    _create_collector_request                                = telemetry_frontend_pb2.Collector()
+    _create_collector_request.collector_id.collector_id.uuid = str(uuid.uuid4())
+    _create_collector_request.kpi_id.kpi_id.uuid             = "165d20c5-a446-42fa-812f-e2b7ed283c6f"
+    # _create_collector_request.collector                      = "collector description"
+    _create_collector_request.duration_s                     = float(random.randint(8, 16))
+    _create_collector_request.interval_s                     = float(random.randint(2, 4))
+    return _create_collector_request
+
+def create_collector_filter():
+    _create_collector_filter = telemetry_frontend_pb2.CollectorFilter()
+    new_kpi_id               = _create_collector_filter.kpi_id.add()
+    new_kpi_id.kpi_id.uuid   = "165d20c5-a446-42fa-812f-e2b7ed283c6f"
+    return _create_collector_filter
+
+# ----------------------- "First" Iteration --------------------------------
+# def create_collector_request_a():
+#     _create_collector_request_a                                = telemetry_frontend_pb2.Collector()
+#     _create_collector_request_a.collector_id.collector_id.uuid = "-1"
+#     return _create_collector_request_a
+
+# def create_collector_request_b(str_kpi_id, coll_duration_s, coll_interval_s
+#                                ) -> telemetry_frontend_pb2.Collector:
+#     _create_collector_request_b                                = telemetry_frontend_pb2.Collector()
+#     _create_collector_request_b.collector_id.collector_id.uuid = '1'
+#     _create_collector_request_b.kpi_id.kpi_id.uuid             = str_kpi_id
+#     _create_collector_request_b.duration_s                     = coll_duration_s
+#     _create_collector_request_b.interval_s                     = coll_interval_s
+#     return _create_collector_request_b
+
+# def create_collector_filter():
+#     _create_collector_filter = telemetry_frontend_pb2.CollectorFilter()
+#     new_collector_id                       = _create_collector_filter.collector_id.add()
+#     new_collector_id.collector_id.uuid     = "COLL1"
+#     new_kpi_id                             = _create_collector_filter.kpi_id.add()
+#     new_kpi_id.kpi_id.uuid                 = "KPI1"
+#     new_device_id                          = _create_collector_filter.device_id.add()
+#     new_device_id.device_uuid.uuid         = 'DEV1'
+#     new_service_id                         = _create_collector_filter.service_id.add()
+#     new_service_id.service_uuid.uuid       = 'SERV1'
+#     new_slice_id                           = _create_collector_filter.slice_id.add()
+#     new_slice_id.slice_uuid.uuid           = 'SLC1'
+#     new_endpoint_id                        = _create_collector_filter.endpoint_id.add()
+#     new_endpoint_id.endpoint_uuid.uuid     = 'END1'
+#     new_connection_id                      = _create_collector_filter.connection_id.add()
+#     new_connection_id.connection_uuid.uuid = 'CON1'
+#     _create_collector_filter.kpi_sample_type.append(KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED)
+#     return _create_collector_filter
+
+# def create_collector_list():
+#     _create_collector_list = telemetry_frontend_pb2.CollectorList()
+#     return _create_collector_list
\ No newline at end of file
diff --git a/src/telemetry/frontend/tests/__init__.py b/src/telemetry/frontend/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/telemetry/frontend/tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/telemetry/frontend/tests/test_frontend.py b/src/telemetry/frontend/tests/test_frontend.py
new file mode 100644
index 0000000000000000000000000000000000000000..002cc430721845aa5aa18274375e2c22b5d77ff7
--- /dev/null
+++ b/src/telemetry/frontend/tests/test_frontend.py
@@ -0,0 +1,204 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import time
+import pytest
+import logging
+from typing import Union
+
+from common.proto.context_pb2 import Empty
+from common.Constants import ServiceNameEnum
+from common.proto.telemetry_frontend_pb2 import CollectorId, CollectorList
+from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
+from context.client.ContextClient import ContextClient
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
+from common.Settings import ( 
+    get_service_port_grpc, get_env_var_name, ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC)
+
+from telemetry.frontend.client.TelemetryFrontendClient import TelemetryFrontendClient
+from telemetry.frontend.service.TelemetryFrontendService import TelemetryFrontendService
+from telemetry.frontend.service.TelemetryFrontendServiceServicerImpl import TelemetryFrontendServiceServicerImpl
+from telemetry.frontend.tests.Messages import ( create_collector_request, create_collector_filter)
+from telemetry.database.managementDB import managementDB
+from telemetry.database.TelemetryEngine import TelemetryEngine
+
+from device.client.DeviceClient import DeviceClient
+from device.service.DeviceService import DeviceService
+from device.service.driver_api.DriverFactory import DriverFactory
+from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
+
+from monitoring.service.NameMapping import NameMapping
+
+os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE'
+from device.service.drivers import DRIVERS
+
+###########################
+# Tests Setup
+###########################
+
+LOCAL_HOST = '127.0.0.1'
+MOCKSERVICE_PORT = 10000
+
+TELEMETRY_FRONTEND_PORT = str(MOCKSERVICE_PORT) + str(get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND))
+os.environ[get_env_var_name(ServiceNameEnum.TELEMETRYFRONTEND, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.TELEMETRYFRONTEND, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(TELEMETRY_FRONTEND_PORT)
+
+LOGGER = logging.getLogger(__name__)
+
+class MockContextService(GenericGrpcService):
+    # Mock Service implementing Context to simplify unitary tests of Monitoring
+
+    def __init__(self, bind_port: Union[str, int]) -> None:
+        super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService')
+
+    # pylint: disable=attribute-defined-outside-init
+    def install_servicers(self):
+        self.context_servicer = MockServicerImpl_Context()
+        add_ContextServiceServicer_to_server(self.context_servicer, self.server)
+
+@pytest.fixture(scope='session')
+def context_service():
+    LOGGER.info('Initializing MockContextService...')
+    _service = MockContextService(MOCKSERVICE_PORT)
+    _service.start()
+    
+    LOGGER.info('Yielding MockContextService...')
+    yield _service
+
+    LOGGER.info('Terminating MockContextService...')
+    _service.context_servicer.msg_broker.terminate()
+    _service.stop()
+
+    LOGGER.info('Terminated MockContextService...')
+
+@pytest.fixture(scope='session')
+def context_client(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument
+    LOGGER.info('Initializing ContextClient...')
+    _client = ContextClient()
+    
+    LOGGER.info('Yielding ContextClient...')
+    yield _client
+
+    LOGGER.info('Closing ContextClient...')
+    _client.close()
+
+    LOGGER.info('Closed ContextClient...')
+
+@pytest.fixture(scope='session')
+def device_service(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument
+    LOGGER.info('Initializing DeviceService...')
+    driver_factory = DriverFactory(DRIVERS)
+    driver_instance_cache = DriverInstanceCache(driver_factory)
+    _service = DeviceService(driver_instance_cache)
+    _service.start()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding DeviceService...')
+    yield _service
+
+    LOGGER.info('Terminating DeviceService...')
+    _service.stop()
+
+    LOGGER.info('Terminated DeviceService...')
+
+@pytest.fixture(scope='session')
+def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument
+    LOGGER.info('Initializing DeviceClient...')
+    _client = DeviceClient()
+
+    LOGGER.info('Yielding DeviceClient...')
+    yield _client
+
+    LOGGER.info('Closing DeviceClient...')
+    _client.close()
+
+    LOGGER.info('Closed DeviceClient...')
+
+@pytest.fixture(scope='session')
+def telemetryFrontend_service(
+        context_service : MockContextService,
+        device_service  : DeviceService
+    ):
+    LOGGER.info('Initializing TelemetryFrontendService...')
+    name_mapping = NameMapping()
+
+    _service = TelemetryFrontendService(name_mapping)
+    _service.start()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding TelemetryFrontendService...')
+    yield _service
+
+    LOGGER.info('Terminating TelemetryFrontendService...')
+    _service.stop()
+
+    LOGGER.info('Terminated TelemetryFrontendService...')
+
+@pytest.fixture(scope='session')
+def telemetryFrontend_client(
+        telemetryFrontend_service : TelemetryFrontendService
+    ):
+    LOGGER.info('Initializing TelemetryFrontendClient...')
+    _client = TelemetryFrontendClient()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding TelemetryFrontendClient...')
+    yield _client
+
+    LOGGER.info('Closing TelemetryFrontendClient...')
+    _client.close()
+
+    LOGGER.info('Closed TelemetryFrontendClient...')
+
+
+###########################
+# Tests Implementation of Telemetry Frontend
+###########################
+
+def test_verify_db_and_table():
+    LOGGER.info(' >>> test_verify_database_and_tables START: <<< ')
+    _engine = TelemetryEngine.get_engine()
+    managementDB.create_database(_engine)
+    managementDB.create_tables(_engine)
+
+def test_StartCollector(telemetryFrontend_client):
+    LOGGER.info(' >>> test_StartCollector START: <<< ')
+    response = telemetryFrontend_client.StartCollector(create_collector_request())
+    LOGGER.debug(str(response))
+    assert isinstance(response, CollectorId)
+
+def test_run_kafka_listener():
+    LOGGER.info(' >>> test_run_kafka_listener START: <<< ')
+    name_mapping = NameMapping()
+    TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl(name_mapping)
+    response = TelemetryFrontendServiceObj.run_kafka_listener()     # Method "run_kafka_listener" is not define in frontend.proto
+    LOGGER.debug(str(response))
+    assert isinstance(response, bool)
+
+def test_StopCollector(telemetryFrontend_client):
+    LOGGER.info(' >>> test_StopCollector START: <<< ')
+    _collector_id = telemetryFrontend_client.StartCollector(create_collector_request())
+    time.sleep(3)   # wait for small amount before call the stopCollecter()
+    response = telemetryFrontend_client.StopCollector(_collector_id)
+    LOGGER.debug(str(response))
+    assert isinstance(response, Empty)
+
+def test_select_collectors(telemetryFrontend_client):
+    LOGGER.info(' >>> test_select_collector requesting <<< ')
+    response = telemetryFrontend_client.SelectCollectors(create_collector_filter())
+    LOGGER.info('Received Rows after applying Filter: {:} '.format(response))
+    LOGGER.debug(str(response))
+    assert isinstance(response, CollectorList)
\ No newline at end of file
diff --git a/src/telemetry/requirements.in b/src/telemetry/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..a0e78d2bfb7270b9664ad5ba810e2f213d887bf7
--- /dev/null
+++ b/src/telemetry/requirements.in
@@ -0,0 +1,24 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+anytree==2.8.0
+APScheduler==3.10.1
+influx-line-protocol==0.1.4
+psycopg2-binary==2.9.3
+python-dateutil==2.8.2
+python-json-logger==2.0.2
+pytz==2024.1
+questdb==1.0.1
+requests==2.27.1
+xmltodict==0.12.0
\ No newline at end of file
diff --git a/src/telemetry/telemetry_virenv.txt b/src/telemetry/telemetry_virenv.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e39f80b6593d6c41411751cdd0ea59ee05344570
--- /dev/null
+++ b/src/telemetry/telemetry_virenv.txt
@@ -0,0 +1,49 @@
+anytree==2.8.0
+APScheduler==3.10.1
+attrs==23.2.0
+certifi==2024.2.2
+charset-normalizer==2.0.12
+colorama==0.4.6
+confluent-kafka==2.3.0
+coverage==6.3
+future-fstrings==1.2.0
+greenlet==3.0.3
+grpcio==1.47.5
+grpcio-health-checking==1.47.5
+grpcio-tools==1.47.5
+grpclib==0.4.4
+h2==4.1.0
+hpack==4.0.0
+hyperframe==6.0.1
+idna==3.7
+influx-line-protocol==0.1.4
+iniconfig==2.0.0
+kafka-python==2.0.2
+multidict==6.0.5
+networkx==3.3
+packaging==24.0
+pluggy==1.5.0
+prettytable==3.5.0
+prometheus-client==0.13.0
+protobuf==3.20.3
+psycopg2-binary==2.9.3
+py==1.11.0
+py-cpuinfo==9.0.0
+pytest==6.2.5
+pytest-benchmark==3.4.1
+pytest-depends==1.0.1
+python-dateutil==2.8.2
+python-json-logger==2.0.2
+pytz==2024.1
+questdb==1.0.1
+requests==2.27.1
+six==1.16.0
+SQLAlchemy==1.4.52
+sqlalchemy-cockroachdb==1.4.4
+SQLAlchemy-Utils==0.38.3
+toml==0.10.2
+typing_extensions==4.12.0
+tzlocal==5.2
+urllib3==1.26.18
+wcwidth==0.2.13
+xmltodict==0.12.0