diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 5a4e5b601cfd2d785fd847bda9b2bdd794d6ce37..edb7d9799a2aa2050636dc61f470bfb599442b7a 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -50,5 +50,7 @@ include:
   - local: '/src/kpi_value_api/.gitlab-ci.yml'
   - local: '/src/kpi_value_writer/.gitlab-ci.yml'
   - local: '/src/telemetry/.gitlab-ci.yml'
+  - local: '/src/analytics/.gitlab-ci.yml'
+
   # This should be last one: end-to-end integration tests
   - local: '/src/tests/.gitlab-ci.yml'
diff --git a/deploy/kafka.sh b/deploy/kafka.sh
index 0483bce153b457800c6f7db2ef66685e90118111..4cbcdb7014c983eeda9bab1d6655fa042751b931 100755
--- a/deploy/kafka.sh
+++ b/deploy/kafka.sh
@@ -47,10 +47,10 @@ function kafka_deploy() {
     cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}"
 
     # echo "Apache Kafka Namespace"
-    echo ">>> Delete Apache Kafka Namespace"
+    echo "Delete Apache Kafka Namespace"
     kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found
 
-    echo ">>> Create Apache Kafka Namespace"
+    echo "Create Apache Kafka Namespace"
     kubectl create namespace ${KFK_NAMESPACE}
 
     # echo ">>> Deplying Apache Kafka Zookeeper"
@@ -76,15 +76,15 @@ function kafka_deploy() {
     # fi
 }
 
-echo "Apache Kafka"
-echo ">>> Checking if Apache Kafka is deployed ... "
+echo ">>> Apache Kafka"
+echo "Checking if Apache Kafka is deployed ... "
 if [ "$KFK_REDEPLOY" == "YES" ]; then
-    echo ">>> Redeploying kafka namespace"
+    echo "Redeploying kafka namespace"
     kafka_deploy
 elif kubectl get namespace "${KFK_NAMESPACE}" &> /dev/null; then
-    echo ">>> Apache Kafka already present; skipping step." 
+    echo "Apache Kafka already present; skipping step." 
 else
-    echo ">>> Kafka namespace doesn't exists. Deploying kafka namespace"
+    echo "Kafka namespace doesn't exists. Deploying kafka namespace"
     kafka_deploy
 fi
 echo
diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index 189ae11e16e77196d6728482b7f16443149b60a9..65c1e8de28f2045b2ac78938b84d3c33e282025e 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -146,55 +146,17 @@ kubectl create namespace $TFS_K8S_NAMESPACE
 sleep 2
 printf "\n"
 
-echo "Create secret with CockroachDB data"
+echo ">>> Create Secret with CockroachDB data..."
 CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
-CRDB_DATABASE_CONTEXT=${CRDB_DATABASE}  # TODO: change by specific configurable environment variable
 kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
     --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
     --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
-    --from-literal=CRDB_DATABASE=${CRDB_DATABASE_CONTEXT} \
     --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
     --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
     --from-literal=CRDB_SSLMODE=require
 printf "\n"
 
-echo "Create secret with CockroachDB data for KPI Management microservices"
-CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
-CRDB_DATABASE_KPI_MGMT="tfs_kpi_mgmt"  # TODO: change by specific configurable environment variable
-kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
-    --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
-    --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
-    --from-literal=CRDB_DATABASE=${CRDB_DATABASE_KPI_MGMT} \
-    --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
-    --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
-    --from-literal=CRDB_SSLMODE=require
-printf "\n"
-
-echo "Create secret with CockroachDB data for Telemetry microservices"
-CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
-CRDB_DATABASE_TELEMETRY="tfs_telemetry"  # TODO: change by specific configurable environment variable
-kubectl create secret generic crdb-telemetry --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
-    --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
-    --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
-    --from-literal=CRDB_DATABASE=${CRDB_DATABASE_TELEMETRY} \
-    --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
-    --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
-    --from-literal=CRDB_SSLMODE=require
-printf "\n"
-
-echo "Create secret with CockroachDB data for Analytics microservices"
-CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
-CRDB_DATABASE_ANALYTICS="tfs_analytics"  # TODO: change by specific configurable environment variable
-kubectl create secret generic crdb-analytics --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
-    --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
-    --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
-    --from-literal=CRDB_DATABASE=${CRDB_DATABASE_ANALYTICS} \
-    --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
-    --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
-    --from-literal=CRDB_SSLMODE=require
-printf "\n"
-
-echo "Create secret with Apache Kafka data for KPI, Telemetry and Analytics microservices"
+echo ">>> Create Secret with Apache Kakfa..."
 KFK_SERVER_PORT=$(kubectl --namespace ${KFK_NAMESPACE} get service kafka-service -o 'jsonpath={.spec.ports[0].port}')
 kubectl create secret generic kfk-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
     --from-literal=KFK_NAMESPACE=${KFK_NAMESPACE} \
@@ -669,6 +631,10 @@ if [[ "$TFS_COMPONENTS" == *"monitoring"* ]] && [[ "$TFS_COMPONENTS" == *"webui"
     printf "\n\n"
 fi
 
+echo "Pruning Docker Images..."
+docker image prune --force
+printf "\n\n"
+
 if [ "$DOCKER_BUILD" == "docker buildx build" ]; then
     echo "Pruning Docker Buildx Cache..."
     docker buildx prune --force
diff --git a/manifests/analyticsservice.yaml b/manifests/analyticsservice.yaml
index 6284c4e79c2d704dff93e9de1811a331a12cb8d8..f014fe2dd29eedd4cb8758c253319b93df81ae36 100644
--- a/manifests/analyticsservice.yaml
+++ b/manifests/analyticsservice.yaml
@@ -37,9 +37,13 @@ spec:
           env:
             - name: LOG_LEVEL
               value: "INFO"
+            - name: CRDB_DATABASE
+              value: "tfs_analytics"
+            - name: METRICS_PORT
+              value: "9192"
           envFrom:
             - secretRef:
-                name: crdb-analytics
+                name: crdb-data
             - secretRef:
                 name: kfk-kpi-data
           readinessProbe:
@@ -60,10 +64,12 @@ spec:
           imagePullPolicy: Always
           ports:
             - containerPort: 30090
-            - containerPort: 9192
+            - containerPort: 9193
           env:
             - name: LOG_LEVEL
               value: "INFO"
+            - name: METRICS_PORT
+              value: "9193"
           envFrom:
             - secretRef:
                 name: kfk-kpi-data
@@ -100,10 +106,14 @@ spec:
       protocol: TCP
       port: 30090
       targetPort: 30090
-    - name: metrics
+    - name: metrics-frontend
       protocol: TCP
       port: 9192
       targetPort: 9192
+    - name: metrics-backend
+      protocol: TCP
+      port: 9193
+      targetPort: 9193
 ---
 apiVersion: autoscaling/v2
 kind: HorizontalPodAutoscaler
diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml
index 3abc4f208da8b4820b589b798a328c4a971f55f0..0fc8a1c44f7358a962276ebcf38a165d2db986cd 100644
--- a/manifests/contextservice.yaml
+++ b/manifests/contextservice.yaml
@@ -45,6 +45,8 @@ spec:
               value: "FALSE"
             - name: ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY
               value: "FALSE"
+            - name: CRDB_DATABASE
+              value: "tfs_context"
           envFrom:
             - secretRef:
                 name: crdb-data
diff --git a/manifests/kpi_managerservice.yaml b/manifests/kpi_managerservice.yaml
index 984d783a9de7ed3c0c02e87d82ec673dc19c9508..f8ee8ff821ec1acb689cbe8d0bb8f8f407c971e3 100644
--- a/manifests/kpi_managerservice.yaml
+++ b/manifests/kpi_managerservice.yaml
@@ -39,9 +39,11 @@ spec:
           env:
             - name: LOG_LEVEL
               value: "INFO"
+            - name: CRDB_DATABASE
+              value: "tfs_kpi_mgmt"
           envFrom:
             - secretRef:
-                name: crdb-kpi-data
+                name: crdb-data
           readinessProbe:
             exec:
               command: ["/bin/grpc_health_probe", "-addr=:30010"]
diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml
index d3892118a3d8330335b58459a0953bb45e4854ea..70f553e6425ca7972b8af185f432842b4e184790 100644
--- a/manifests/nbiservice.yaml
+++ b/manifests/nbiservice.yaml
@@ -38,6 +38,8 @@ spec:
           env:
             - name: LOG_LEVEL
               value: "INFO"
+            - name: IETF_NETWORK_RENDERER
+              value: "LIBYANG"
           readinessProbe:
             exec:
               command: ["/bin/grpc_health_probe", "-addr=:9090"]
diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml
index 955d5726a9f8f79560327a8f595c1865f6d37d22..ed713bf29ad8228ab3f5b051af24519c2fb9ef09 100644
--- a/manifests/nginx_ingress_http.yaml
+++ b/manifests/nginx_ingress_http.yaml
@@ -62,3 +62,10 @@ spec:
                 name: nbiservice
                 port:
                   number: 8080
+          - path: /()(qkd_app/.*)
+            pathType: Prefix
+            backend:
+              service:
+                name: qkd-appservice
+                port:
+                  number: 8005
diff --git a/manifests/qkd_appservice.yaml b/manifests/qkd_appservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4f89d6c6f8400b509dc595f551e8f181e70b2f51
--- /dev/null
+++ b/manifests/qkd_appservice.yaml
@@ -0,0 +1,83 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: qkd-appservice
+spec:
+  selector:
+    matchLabels:
+      app: qkd-appservice
+  #replicas: 1
+  template:
+    metadata:
+      labels:
+        app: qkd-appservice
+    spec:
+      terminationGracePeriodSeconds: 5
+      containers:
+      - name: server
+        image: labs.etsi.org:5050/tfs/controller/qkd_app:latest
+        imagePullPolicy: Always
+        ports:
+        - containerPort: 10060
+        - containerPort: 9192
+        - containerPort: 8005
+        env:
+        - name: LOG_LEVEL
+          value: "DEBUG"
+        - name: CRDB_DATABASE_APP
+          value: "qkd_app"
+        envFrom:
+        - secretRef:
+            name: crdb-data
+        - secretRef:
+            name: nats-data
+        readinessProbe:
+          exec:
+            command: ["/bin/grpc_health_probe", "-addr=:10060"]
+        livenessProbe:
+          exec:
+            command: ["/bin/grpc_health_probe", "-addr=:10060"]
+        resources:
+          requests:
+            cpu: 150m
+            memory: 128Mi
+          limits:
+            cpu: 500m
+            memory: 512Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: qkd-appservice
+  labels:
+    app: qkd-appservice
+spec:
+  type: ClusterIP
+  selector:
+    app: qkd-appservice
+  ports:
+  - name: grpc
+    protocol: TCP
+    port: 10060
+    targetPort: 10060
+  - name: metrics
+    protocol: TCP
+    port: 9192
+    targetPort: 9192
+  - name: http
+    port: 8005
+    targetPort: 8005
diff --git a/manifests/servicemonitors.yaml b/manifests/servicemonitors.yaml
index 716c1c6891802d7fcc55da798d06c650373fb1b5..8a8fe6f39eff87d12582f2f83734c07dc695cea3 100644
--- a/manifests/servicemonitors.yaml
+++ b/manifests/servicemonitors.yaml
@@ -475,3 +475,156 @@ spec:
     any: false
     matchNames:
       - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-analyticsservice-metric
+  labels:
+    app: analyticsservice
+    #release: prometheus
+    #release: prom  # name of the release
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing
+    #   the servicemonitor of Prometheus itself: Without the correct name,
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: analyticsservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+    - port: metrics-frontend # named port in target app
+      scheme: http
+      path: /metrics # path to scrape
+      interval: 5s # scrape interval
+    - port: metrics-backend # named port in target app
+      scheme: http
+      path: /metrics # path to scrape
+      interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+      - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-telemetryservice-metric
+  labels:
+    app: telemetryservice
+    #release: prometheus
+    #release: prom  # name of the release
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing
+    #   the servicemonitor of Prometheus itself: Without the correct name,
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: telemetryservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+    - port: metrics-frontend # named port in target app
+      scheme: http
+      path: /metrics # path to scrape
+      interval: 5s # scrape interval
+    - port: metrics-backend # named port in target app
+      scheme: http
+      path: /metrics # path to scrape
+      interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+      - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-kpi-managerservice-metric
+  labels:
+    app: kpi-managerservice
+    #release: prometheus
+    #release: prom  # name of the release
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing
+    #   the servicemonitor of Prometheus itself: Without the correct name,
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: kpi-managerservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+    - port: metrics # named port in target app
+      scheme: http
+      path: /metrics # path to scrape
+      interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+      - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-kpi_value_apiservice-metric
+  labels:
+    app: kpi_value_apiservice
+    #release: prometheus
+    #release: prom  # name of the release
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing
+    #   the servicemonitor of Prometheus itself: Without the correct name,
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: kpi_value_apiservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+    - port: metrics # named port in target app
+      scheme: http
+      path: /metrics # path to scrape
+      interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+      - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-kpi_value_writerservice-metric
+  labels:
+    app: kpi_value_writerservice
+    #release: prometheus
+    #release: prom  # name of the release
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing
+    #   the servicemonitor of Prometheus itself: Without the correct name,
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: kpi_value_writerservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+    - port: metrics # named port in target app
+      scheme: http
+      path: /metrics # path to scrape
+      interval: 5s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+      - tfs # namespace where the app is running
diff --git a/manifests/telemetryservice.yaml b/manifests/telemetryservice.yaml
index a88367e45c763e71c2292899f86b22e910053ab9..04141b8ddb2963dd7455599386733b9cea4581be 100644
--- a/manifests/telemetryservice.yaml
+++ b/manifests/telemetryservice.yaml
@@ -37,9 +37,13 @@ spec:
           env:
             - name: LOG_LEVEL
               value: "INFO"
+            - name: CRDB_DATABASE
+              value: "tfs_telemetry"
+            - name: METRICS_PORT
+              value: "9192"
           envFrom:
             - secretRef:
-                name: crdb-telemetry
+                name: crdb-data
             - secretRef:
                 name: kfk-kpi-data
           readinessProbe:
@@ -60,10 +64,12 @@ spec:
           imagePullPolicy: Always
           ports:
             - containerPort: 30060
-            - containerPort: 9192
+            - containerPort: 9193
           env:
             - name: LOG_LEVEL
               value: "INFO"
+            - name: METRICS_PORT
+              value: "9193"
           envFrom:
             - secretRef:
                 name: kfk-kpi-data
@@ -100,10 +106,14 @@ spec:
       protocol: TCP
       port: 30060
       targetPort: 30060
-    - name: metrics
+    - name: metrics-frontend
       protocol: TCP
       port: 9192
       targetPort: 9192
+    - name: metrics-backend
+      protocol: TCP
+      port: 9193
+      targetPort: 9193
 ---
 apiVersion: autoscaling/v2
 kind: HorizontalPodAutoscaler
diff --git a/my_deploy.sh b/my_deploy.sh
index 456291b92cd38f535bec7448bcd989291c4e6181..10a262c6e942de48c1265f7b6c8e72fe127a9de4 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -71,7 +71,14 @@ export TFS_COMPONENTS="${TFS_COMPONENTS} policy"
 #fi
 
 # Uncomment to activate QKD App
-#export TFS_COMPONENTS="${TFS_COMPONENTS} app"
+#   To manage QKD Apps, "service" requires "qkd_app" to be deployed
+#   before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
+#   "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it.
+#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
+#    BEFORE="${TFS_COMPONENTS% service*}"
+#    AFTER="${TFS_COMPONENTS#* service}"
+#    export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}"
+#fi
 
 
 # Set the tag you want to use for your images.
diff --git a/proto/qkd_app.proto b/proto/qkd_app.proto
new file mode 100644
index 0000000000000000000000000000000000000000..7b6c47330833849b889e770aac43844ec6e6072c
--- /dev/null
+++ b/proto/qkd_app.proto
@@ -0,0 +1,53 @@
+syntax = "proto3";
+package qkd_app;
+
+import "context.proto";
+
+// Optare: Change this if you want to change App's structure or enums. 
+// Optare: If a message (structure) is changed it must be changed in src/app/service/database
+
+enum QKDAppStatusEnum {
+  QKDAPPSTATUS_ON = 0;
+  QKDAPPSTATUS_DISCONNECTED = 1;
+  QKDAPPSTATUS_OUT_OF_TIME = 2;
+  QKDAPPSTATUS_ZOMBIE = 3;
+}
+
+enum QKDAppTypesEnum {
+  QKDAPPTYPES_INTERNAL = 0;
+  QKDAPPTYPES_CLIENT = 1;
+}
+
+message QKDLId {
+  context.Uuid qkdl_uuid = 1;
+}
+
+
+message App {
+  AppId app_id = 1;
+  QKDAppStatusEnum app_status = 2;
+  QKDAppTypesEnum app_type = 3;
+  string server_app_id = 4;
+  repeated string client_app_id = 5;
+  repeated QKDLId backing_qkdl_id = 6;
+  context.DeviceId local_device_id = 7;
+  context.DeviceId remote_device_id = 8;
+}
+
+
+message AppId {
+  context.ContextId context_id = 1;
+  context.Uuid app_uuid = 2;
+}
+
+
+service AppService {
+  rpc RegisterApp(App) returns (context.Empty) {}
+  rpc ListApps       (context.ContextId     ) returns (       AppList     ) {}
+ }
+ 
+
+
+ message AppList {
+  repeated App apps = 1;
+}
diff --git a/scripts/run_tests_locally-kpi-DB.sh b/scripts/run_tests_locally-kpi-DB.sh
index ad1b4c57b6632266d539db07637fb1c0b024cf36..29c6595102c22bc47fa221eb80459aea934cbcd9 100755
--- a/scripts/run_tests_locally-kpi-DB.sh
+++ b/scripts/run_tests_locally-kpi-DB.sh
@@ -24,7 +24,7 @@ cd $PROJECTDIR/src
 # python3 kpi_manager/tests/test_unitary.py
 
 RCFILE=$PROJECTDIR/coverage/.coveragerc
-CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace ${CRDB_NAMESPACE} -o 'jsonpath={.spec.clusterIP}')
+CRDB_SQL_ADDRESS=$(kubectl get service --namespace ${CRDB_NAMESPACE} cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
 export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
 python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
     kpi_manager/tests/test_kpi_db.py
diff --git a/scripts/run_tests_locally-telemetry-DB.sh b/scripts/run_tests_locally-telemetry-DB.sh
index 363b6c645d541c653f36c817a326015f5481d88b..f7c80dd86aa7503a7f42fe52db34167377ba0a37 100755
--- a/scripts/run_tests_locally-telemetry-DB.sh
+++ b/scripts/run_tests_locally-telemetry-DB.sh
@@ -20,9 +20,8 @@ cd $PROJECTDIR/src
 # RCFILE=$PROJECTDIR/coverage/.coveragerc
 # coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
 #     kpi_manager/tests/test_unitary.py
-CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
+CRDB_SQL_ADDRESS=$(kubectl get service --namespace ${CRDB_NAMESPACE} cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
 export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs-telemetry?sslmode=require"
-
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 python3 -m pytest --log-level=DEBUG --log-cli-level=debug --verbose \
     telemetry/tests/test_telemetryDB.py
diff --git a/scripts/run_tests_locally-telemetry-backend.sh b/scripts/run_tests_locally-telemetry-backend.sh
index 4867335a53ee17c1fa279b2ee6bcf2bbac6bd1ba..97a06a0d6c16daf94e3e6b30bfc70eca3e7ce3a3 100755
--- a/scripts/run_tests_locally-telemetry-backend.sh
+++ b/scripts/run_tests_locally-telemetry-backend.sh
@@ -19,11 +19,9 @@ PROJECTDIR=`pwd`
 cd $PROJECTDIR/src
 # RCFILE=$PROJECTDIR/coverage/.coveragerc
 # coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
-#     kpi_manager/tests/test_unitary.py
 
-# python3 kpi_manager/tests/test_unitary.py
-CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}')
-export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
 RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+
 python3 -m pytest --log-level=INFO --log-cli-level=debug --verbose \
-    telemetry/backend/tests/test_backend.py
+    telemetry/backend/tests/test_TelemetryBackend.py
diff --git a/scripts/run_tests_locally-telemetry-frontend.sh b/scripts/run_tests_locally-telemetry-frontend.sh
index 0ed828310e2adbaf3a61e5d0e8a0a8e2283452d4..7506be5e0750b44e37368e86dbbfd00131c0d270 100755
--- a/scripts/run_tests_locally-telemetry-frontend.sh
+++ b/scripts/run_tests_locally-telemetry-frontend.sh
@@ -17,13 +17,10 @@
 PROJECTDIR=`pwd`
 
 cd $PROJECTDIR/src
-# RCFILE=$PROJECTDIR/coverage/.coveragerc
-# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
-#     kpi_manager/tests/test_unitary.py
 
-# python3 kpi_manager/tests/test_unitary.py
-CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}')
+CRDB_SQL_ADDRESS=$(kubectl get service --namespace ${CRDB_NAMESPACE} cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
 export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
+
 RCFILE=$PROJECTDIR/coverage/.coveragerc
 python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
     telemetry/frontend/tests/test_frontend.py
diff --git a/scripts/show_logs_qkd_app.sh b/scripts/show_logs_qkd_app.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f4f9ee6a13367c8d50eb4401a5d5f357726e5966
--- /dev/null
+++ b/scripts/show_logs_qkd_app.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/qkd-appservice -c server
diff --git a/src/analytics/.gitlab-ci.yml b/src/analytics/.gitlab-ci.yml
index 33ea9f3cf4534e02f40aec13420a4839c86a0d10..dfc5f2195eeee621e95465bbc996d5a9af3e6c9c 100644
--- a/src/analytics/.gitlab-ci.yml
+++ b/src/analytics/.gitlab-ci.yml
@@ -60,6 +60,7 @@ unit_test analytics-backend:
     - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
     - if docker container ls | grep kafka; then docker rm -f kafka; else echo "Kafka container is not in the system"; fi
     - if docker container ls | grep zookeeper; then docker rm -f zookeeper; else echo "Zookeeper container is not in the system"; fi
+    # - if docker container ls | grep ${IMAGE_NAME}-frontend; then docker rm -f ${IMAGE_NAME}-frontend; else echo "${IMAGE_NAME}-frontend container is not in the system"; fi
     - if docker container ls | grep ${IMAGE_NAME}-backend; then docker rm -f ${IMAGE_NAME}-backend; else echo "${IMAGE_NAME}-backend container is not in the system"; fi
     - docker container prune -f
   script:
@@ -68,6 +69,7 @@ unit_test analytics-backend:
     - docker pull "bitnami/kafka:latest"
     - >
       docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
+      --env ALLOW_ANONYMOUS_LOGIN=yes
       bitnami/zookeeper:latest
     - sleep 10 # Wait for Zookeeper to start
     - >
@@ -75,7 +77,7 @@ unit_test analytics-backend:
       --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
       --env ALLOW_PLAINTEXT_LISTENER=yes
       bitnami/kafka:latest
-    - sleep 10 # Wait for Kafka to start
+    - sleep 20 # Wait for Kafka to start
     - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
     - echo $KAFKA_IP    
     - >
@@ -93,12 +95,12 @@ unit_test analytics-backend:
     - docker exec -i ${IMAGE_NAME}-backend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
+    - docker rm -f ${IMAGE_NAME}-backend
+    - docker rm -f kafka
+    - docker rm -f zookeeper
     - docker network rm teraflowbridge
     - docker volume prune --force
     - docker image prune --force
-    - docker rm -f ${IMAGE_NAME}-backend
-    - docker rm -f zookeeper
-    - docker rm -f kafka
   rules:
     - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
     - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
@@ -150,19 +152,20 @@ unit_test analytics-frontend:
     - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
     - echo $CRDB_ADDRESS
     - >
-      docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 \
-      -e ALLOW_ANONYMOUS_LOGIN=yes \
+      docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
+      --env ALLOW_ANONYMOUS_LOGIN=yes
       bitnami/zookeeper:latest
     - sleep 10 # Wait for Zookeeper to start
-    - docker run --name kafka -d --network=teraflowbridge -p 9092:9092
+    - >
+      docker run --name kafka -d --network=teraflowbridge -p 9092:9092
       --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
       --env ALLOW_PLAINTEXT_LISTENER=yes
       bitnami/kafka:latest
-    - sleep 10 # Wait for Kafka to start
+    - sleep 20 # Wait for Kafka to start
     - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
     - echo $KAFKA_IP
-    # - docker logs zookeeper
-    # - docker logs kafka
+    - docker logs zookeeper
+    - docker logs kafka
     - >
       docker run --name $IMAGE_NAME-frontend -d -p 30050:30050
       --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require"
@@ -179,13 +182,13 @@ unit_test analytics-frontend:
     - docker exec -i ${IMAGE_NAME}-frontend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
-    - docker volume rm -f crdb
-    - docker network rm teraflowbridge
-    - docker volume prune --force
-    - docker image prune --force
     - docker rm -f ${IMAGE_NAME}-frontend
     - docker rm -f zookeeper
     - docker rm -f kafka
+    - docker volume rm -f crdb
+    - docker volume prune --force
+    - docker image prune --force
+    - docker network rm teraflowbridge
   rules:
     - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
     - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
@@ -200,4 +203,4 @@ unit_test analytics-frontend:
   artifacts:
       when: always
       reports:
-        junit: src/$IMAGE_NAME/frontend/tests/${IMAGE_NAME}-frontend_report.xml
\ No newline at end of file
+        junit: src/$IMAGE_NAME/frontend/tests/${IMAGE_NAME}-frontend_report.xml
diff --git a/src/analytics/database/Analyzer_DB.py b/src/analytics/database/Analyzer_DB.py
index 1ba68989a066e4638adc12e65289ed50b740731d..ab0b50f2ebba8e2590f1fcb4f2801f42a9c5d208 100644
--- a/src/analytics/database/Analyzer_DB.py
+++ b/src/analytics/database/Analyzer_DB.py
@@ -13,138 +13,44 @@
 # limitations under the License.
 
 import logging
-import sqlalchemy_utils
+from common.method_wrappers.Decorator import MetricsPool
+from common.tools.database.GenericDatabase import Database
+from common.method_wrappers.ServiceExceptions import OperationFailedException
 
-from sqlalchemy     import inspect, or_
-from sqlalchemy.orm import sessionmaker
+LOGGER       = logging.getLogger(__name__)
+METRICS_POOL = MetricsPool('KpiManager', 'Database')
 
-from analytics.database.AnalyzerModel         import Analyzer as AnalyzerModel
-from analytics.database.AnalyzerEngine        import AnalyzerEngine
-from common.method_wrappers.ServiceExceptions import (OperationFailedException, AlreadyExistsException)
+class AnalyzerDB(Database):
+    def __init__(self, model) -> None:
+        LOGGER.info('Init KpiManagerService')
+        super().__init__(model)
 
-LOGGER = logging.getLogger(__name__)
-DB_NAME = "tfs_analyzer"        # TODO: export name from enviornment variable
-
-class AnalyzerDB:
-    def __init__(self):
-        self.db_engine = AnalyzerEngine.get_engine()
-        if self.db_engine is None:
-            LOGGER.error('Unable to get SQLAlchemy DB Engine...')
-            return False
-        self.db_name = DB_NAME
-        self.Session = sessionmaker(bind=self.db_engine)
-
-    def create_database(self):
-        if not sqlalchemy_utils.database_exists(self.db_engine.url):
-            LOGGER.debug("Database created. {:}".format(self.db_engine.url))
-            sqlalchemy_utils.create_database(self.db_engine.url)
-
-    def drop_database(self) -> None:
-        if sqlalchemy_utils.database_exists(self.db_engine.url):
-            sqlalchemy_utils.drop_database(self.db_engine.url)
-
-    def create_tables(self):
-        try:
-            AnalyzerModel.metadata.create_all(self.db_engine)     # type: ignore
-            LOGGER.debug("Tables created in the database: {:}".format(self.db_name))
-        except Exception as e:
-            LOGGER.debug("Tables cannot be created in the database. {:s}".format(str(e)))
-            raise OperationFailedException ("Tables can't be created", extra_details=["unable to create table {:}".format(e)])
-
-    def verify_tables(self):
-        try:
-            inspect_object = inspect(self.db_engine)
-            if(inspect_object.has_table('analyzer', None)):
-                LOGGER.info("Table exists in DB: {:}".format(self.db_name))
-        except Exception as e:
-            LOGGER.info("Unable to fetch Table names. {:s}".format(str(e)))
-
-# ----------------- CURD OPERATIONS ---------------------
-
-    def add_row_to_db(self, row):
-        session = self.Session()
-        try:
-            session.add(row)
-            session.commit()
-            LOGGER.debug(f"Row inserted into {row.__class__.__name__} table.")
-            return True
-        except Exception as e:
-            session.rollback()
-            if "psycopg2.errors.UniqueViolation" in str(e):
-                LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}")
-                raise AlreadyExistsException(row.__class__.__name__, row,
-                                             extra_details=["Unique key voilation: {:}".format(e)] )
-            else:
-                LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}")
-                raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)])
-        finally:
-            session.close()
-    
-    def search_db_row_by_id(self, model, col_name, id_to_search):
-        session = self.Session()
-        try:
-            entity = session.query(model).filter_by(**{col_name: id_to_search}).first()
-            if entity:
-                # LOGGER.debug(f"{model.__name__} ID found: {str(entity)}")
-                return entity
-            else:
-                LOGGER.debug(f"{model.__name__} ID not found, No matching row: {str(id_to_search)}")
-                print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search))
-                return None
-        except Exception as e:
-            session.rollback()
-            LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}")
-            raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)])
-        finally:
-            session.close()
-    
-    def delete_db_row_by_id(self, model, col_name, id_to_search):
-        session = self.Session()
-        try:
-            record = session.query(model).filter_by(**{col_name: id_to_search}).first()
-            if record:
-                session.delete(record)
-                session.commit()
-                LOGGER.debug("Deleted %s with %s: %s", model.__name__, col_name, id_to_search)
-            else:
-                LOGGER.debug("%s with %s %s not found", model.__name__, col_name, id_to_search)
-                return None
-        except Exception as e:
-            session.rollback()
-            LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e)
-            raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)])
-        finally:
-            session.close()
-    
     def select_with_filter(self, model, filter_object):
+        """
+        Generic method to create filters dynamically based on filter_object attributes.
+        params:     model:         SQLAlchemy model class to query.
+                    filter_object: Object that contains filtering criteria as attributes.
+        return:     SQLAlchemy session, query and Model
+        """
         session = self.Session()
         try:
-            query = session.query(AnalyzerModel)
-            
+            query = session.query(model)
             # Apply filters based on the filter_object
             if filter_object.analyzer_id:
-                query = query.filter(AnalyzerModel.analyzer_id.in_([a.analyzer_id.uuid for a in filter_object.analyzer_id]))
+                query = query.filter(model.analyzer_id.in_([a.analyzer_id.uuid for a in filter_object.analyzer_id]))
 
             if filter_object.algorithm_names:
-                query = query.filter(AnalyzerModel.algorithm_name.in_(filter_object.algorithm_names))
+                query = query.filter(model.algorithm_name.in_(filter_object.algorithm_names))
 
             if filter_object.input_kpi_ids:
                 input_kpi_uuids = [k.kpi_id.uuid for k in filter_object.input_kpi_ids]
-                query = query.filter(AnalyzerModel.input_kpi_ids.op('&&')(input_kpi_uuids))
+                query = query.filter(model.input_kpi_ids.op('&&')(input_kpi_uuids))
 
             if filter_object.output_kpi_ids:
                 output_kpi_uuids = [k.kpi_id.uuid for k in filter_object.output_kpi_ids]
-                query = query.filter(AnalyzerModel.output_kpi_ids.op('&&')(output_kpi_uuids))
-
-            result = query.all()
-            # query should be added to return all rows
-            if result:
-                LOGGER.debug(f"Fetched filtered rows from {model.__name__} table with filters: {filter_object}") #  - Results: {result}
-            else:
-                LOGGER.warning(f"No matching row found in {model.__name__} table with filters: {filter_object}")
-            return result
+                query = query.filter(model.output_kpi_ids.op('&&')(output_kpi_uuids))
         except Exception as e:
-            LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filter_object} ::: {e}")
-            raise OperationFailedException ("Select by filter", extra_details=["unable to apply the filter {:}".format(e)])
-        finally:
-            session.close()
+            LOGGER.error(f"Error creating filter of {model.__name__} table. ERROR: {e}")
+            raise OperationFailedException ("CreateKpiDescriptorFilter", extra_details=["unable to create the filter {:}".format(e)]) 
+        
+        return super().select_with_filter(query, session, model)
diff --git a/src/analytics/frontend/service/AnalyticsFrontendServiceServicerImpl.py b/src/analytics/frontend/service/AnalyticsFrontendServiceServicerImpl.py
index cde19e37869222d0f643bc33409f4f8711fb1c20..d48132de5d4b507411014b2e194c5fcd71b9bafa 100644
--- a/src/analytics/frontend/service/AnalyticsFrontendServiceServicerImpl.py
+++ b/src/analytics/frontend/service/AnalyticsFrontendServiceServicerImpl.py
@@ -12,10 +12,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, grpc, json
+
+import logging, grpc, json, queue
 
 from typing          import Dict
+from confluent_kafka import Consumer as KafkaConsumer
 from confluent_kafka import Producer as KafkaProducer
+from confluent_kafka import KafkaError
 
 from common.tools.kafka.Variables             import KafkaConfig, KafkaTopic
 from common.proto.context_pb2                 import Empty
@@ -24,7 +27,8 @@ from common.proto.analytics_frontend_pb2      import Analyzer, AnalyzerId, Analy
 from common.proto.analytics_frontend_pb2_grpc import AnalyticsFrontendServiceServicer
 from analytics.database.Analyzer_DB           import AnalyzerDB
 from analytics.database.AnalyzerModel         import Analyzer as AnalyzerModel
-
+from apscheduler.schedulers.background        import BackgroundScheduler
+from apscheduler.triggers.interval            import IntervalTrigger
 
 LOGGER           = logging.getLogger(__name__)
 METRICS_POOL     = MetricsPool('AnalyticsFrontend', 'NBIgRPC')
@@ -32,8 +36,14 @@ METRICS_POOL     = MetricsPool('AnalyticsFrontend', 'NBIgRPC')
 class AnalyticsFrontendServiceServicerImpl(AnalyticsFrontendServiceServicer):
     def __init__(self):
         LOGGER.info('Init AnalyticsFrontendService')
-        self.db_obj         = AnalyzerDB()
+        self.listener_topic = KafkaTopic.ANALYTICS_RESPONSE.value
+        self.db_obj         = AnalyzerDB(AnalyzerModel)
+        self.result_queue   = queue.Queue()
+        self.scheduler      = BackgroundScheduler()
         self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()})
+        self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(),
+                                            'group.id'           : 'analytics-frontend',
+                                            'auto.offset.reset'  : 'latest'})
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def StartAnalyzer(self, 
@@ -46,6 +56,7 @@ class AnalyticsFrontendServiceServicerImpl(AnalyticsFrontendServiceServicer):
             AnalyzerModel.ConvertAnalyzerToRow(request)
         )
         self.PublishStartRequestOnKafka(request)
+        
         response.analyzer_id.uuid = request.analyzer_id.analyzer_id.uuid
         return response
 
@@ -73,6 +84,62 @@ class AnalyticsFrontendServiceServicerImpl(AnalyticsFrontendServiceServicer):
         LOGGER.info("Analyzer Start Request Generated: Analyzer Id: {:}, Value: {:}".format(analyzer_uuid, analyzer_to_generate))
         self.kafka_producer.flush()
 
+
+    def StartResponseListener(self, filter_key=None):
+        """
+        Start the Kafka response listener with APScheduler and return key-value pairs periodically.
+        """
+        LOGGER.info("Starting StartResponseListener")
+        # Schedule the ResponseListener at fixed intervals
+        self.scheduler.add_job(
+            self.response_listener,
+            trigger=IntervalTrigger(seconds=5),
+            args=[filter_key], 
+            id=f"response_listener_{self.listener_topic}",
+            replace_existing=True
+        )
+        self.scheduler.start()
+        LOGGER.info(f"Started Kafka listener for topic {self.listener_topic}...")
+        try:
+            while True:
+                LOGGER.info("entering while...")
+                key, value = self.result_queue.get()  # Wait until a result is available
+                LOGGER.info("In while true ...")
+                yield key, value  # Yield the result to the calling function
+        except KeyboardInterrupt:
+            LOGGER.warning("Listener stopped manually.")
+        finally:
+            self.StopListener()
+
+    def response_listener(self, filter_key=None):
+        """
+        Poll Kafka messages and put key-value pairs into the queue.
+        """
+        LOGGER.info(f"Polling Kafka topic {self.listener_topic}...")
+
+        consumer = self.kafka_consumer
+        consumer.subscribe([self.listener_topic])
+        msg = consumer.poll(2.0)
+        if msg is None:
+            return
+        elif msg.error():
+            if msg.error().code() != KafkaError._PARTITION_EOF:
+                LOGGER.error(f"Kafka error: {msg.error()}")
+            return
+
+        try:
+            key = msg.key().decode('utf-8') if msg.key() else None
+            if filter_key is not None and key == filter_key:
+                value = json.loads(msg.value().decode('utf-8'))
+                LOGGER.info(f"Received key: {key}, value: {value}")
+                self.result_queue.put((key, value))
+            else:
+                LOGGER.info(f"Skipping message with unmatched key: {key}")
+                # value = json.loads(msg.value().decode('utf-8')) # Added for debugging
+                # self.result_queue.put((filter_key, value))             # Added for debugging
+        except Exception as e:
+            LOGGER.error(f"Error processing Kafka message: {e}")
+
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def StopAnalyzer(self, 
                       request : AnalyzerId, grpc_context: grpc.ServicerContext # type: ignore
@@ -107,6 +174,15 @@ class AnalyticsFrontendServiceServicerImpl(AnalyticsFrontendServiceServicer):
         )
         LOGGER.info("Analyzer Stop Request Generated: Analyzer Id: {:}".format(analyzer_uuid))
         self.kafka_producer.flush()
+        self.StopListener()
+
+    def StopListener(self):
+        """
+        Gracefully stop the Kafka listener and the scheduler.
+        """
+        LOGGER.info("Stopping Kafka listener...")
+        self.scheduler.shutdown()
+        LOGGER.info("Kafka listener stopped.")
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SelectAnalyzers(self, 
@@ -126,6 +202,7 @@ class AnalyticsFrontendServiceServicerImpl(AnalyticsFrontendServiceServicer):
                 LOGGER.info('Unable to process filter response {:}'.format(e))
         except Exception as e:
             LOGGER.error('Unable to apply filter on table {:}. ERROR: {:}'.format(AnalyzerModel.__name__, e))
+       
 
     def delivery_callback(self, err, msg):
         if err:
diff --git a/src/analytics/frontend/service/__main__.py b/src/analytics/frontend/service/__main__.py
index 6c331844f45d98095ef98951f3db43a0e2f0c69c..1df996785ec636592cf5197144d916a89257d9af 100644
--- a/src/analytics/frontend/service/__main__.py
+++ b/src/analytics/frontend/service/__main__.py
@@ -16,9 +16,11 @@ import logging, signal, sys, threading
 from prometheus_client import start_http_server
 from common.Settings import get_log_level, get_metrics_port
 from .AnalyticsFrontendService import AnalyticsFrontendService
+from analytics.database.AnalyzerModel import Analyzer as Model
+from common.tools.database.GenericDatabase import Database
 
 terminate = threading.Event()
-LOGGER = None
+LOGGER    = None
 
 def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
     LOGGER.warning('Terminate signal received')
@@ -36,6 +38,11 @@ def main():
 
     LOGGER.info('Starting...')
 
+    # To create DB 
+    kpiDBobj = Database(Model)
+    kpiDBobj.create_database()
+    kpiDBobj.create_tables()
+
     # Start metrics server
     metrics_port = get_metrics_port()
     start_http_server(metrics_port)
diff --git a/src/analytics/frontend/tests/test_frontend.py b/src/analytics/frontend/tests/test_frontend.py
index 1b4e0e14e687b454cdfdefe466dfd11e84bf245b..8903c7bf8cafd7f4f6e6bd4ebdf8f585fd8dc320 100644
--- a/src/analytics/frontend/tests/test_frontend.py
+++ b/src/analytics/frontend/tests/test_frontend.py
@@ -25,7 +25,7 @@ from common.Settings          import ( get_service_port_grpc, get_env_var_name,
                                       ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC )
 
 from common.tools.kafka.Variables                        import KafkaTopic
-from common.proto.kpi_value_api_pb2                      import KpiValue
+from common.proto.analytics_frontend_pb2                 import AnalyzerId, AnalyzerList
 from analytics.frontend.client.AnalyticsFrontendClient   import AnalyticsFrontendClient
 from analytics.frontend.service.AnalyticsFrontendService import AnalyticsFrontendService
 from analytics.frontend.tests.messages                   import ( create_analyzer_id, create_analyzer,
@@ -33,7 +33,7 @@ from analytics.frontend.tests.messages                   import ( create_analyze
 from analytics.frontend.service.AnalyticsFrontendServiceServicerImpl import AnalyticsFrontendServiceServicerImpl
 from apscheduler.schedulers.background                   import BackgroundScheduler
 from apscheduler.triggers.interval                       import IntervalTrigger
-from common.proto.analytics_frontend_pb2                 import Analyzer, AnalyzerId, AnalyzerFilter, AnalyzerList
+
 
 ###########################
 # Tests Setup
@@ -84,23 +84,45 @@ def analyticsFrontend_client(analyticsFrontend_service : AnalyticsFrontendServic
 ###########################
 
 # --- "test_validate_kafka_topics" should be executed before the functionality tests ---
+# def test_validate_kafka_topics():
+#     LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ")
+#     response = KafkaTopic.create_all_topics()
+#     assert isinstance(response, bool)
+
+# ----- core funtionality test -----
+# def test_StartAnalytics(analyticsFrontend_client):
+#     LOGGER.info(' >>> test_StartAnalytic START: <<< ')
+#     response = analyticsFrontend_client.StartAnalyzer(create_analyzer())
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, AnalyzerId)
+
+# To test start and stop listener together
 def test_StartStopAnalyzers(analyticsFrontend_client):
-    LOGGER.info(' >>> test_StartAnalyzers START: <<< ')
+    LOGGER.info(' >>> test_StartStopAnalyzers START: <<< ')
+    LOGGER.info('--> StartAnalyzer')
     added_analyzer_id = analyticsFrontend_client.StartAnalyzer(create_analyzer())
     LOGGER.debug(str(added_analyzer_id))
-    assert isinstance(added_analyzer_id, AnalyzerId)
-
-def test_StopAnalytic(analyticsFrontend_client):
-    LOGGER.info(' >>> test_StopAnalytic START: <<< ')
-    response = analyticsFrontend_client.StopAnalyzer(create_analyzer_id())
+    LOGGER.info(' --> Calling StartResponseListener... ')
+    class_obj = AnalyticsFrontendServiceServicerImpl()
+    response =  class_obj.StartResponseListener(added_analyzer_id.analyzer_id._uuid)
+    LOGGER.debug(response)
+    LOGGER.info("waiting for timer to comlete ...")
+    time.sleep(3)
+    LOGGER.info('--> StopAnalyzer')
+    response = analyticsFrontend_client.StopAnalyzer(added_analyzer_id)
     LOGGER.debug(str(response))
-    assert isinstance(response, Empty)
 
-def test_SelectAnalytics(analyticsFrontend_client):
-    LOGGER.info(' >>> test_SelectAnalytics START: <<< ')
-    response = analyticsFrontend_client.SelectAnalyzers(create_analyzer_filter())
-    LOGGER.debug(str(response))
-    assert isinstance(response, AnalyzerList)
+# def test_SelectAnalytics(analyticsFrontend_client):
+#     LOGGER.info(' >>> test_SelectAnalytics START: <<< ')
+#     response = analyticsFrontend_client.SelectAnalyzers(create_analyzer_filter())
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, AnalyzerList)
+
+# def test_StopAnalytic(analyticsFrontend_client):
+#     LOGGER.info(' >>> test_StopAnalytic START: <<< ')
+#     response = analyticsFrontend_client.StopAnalyzer(create_analyzer_id())
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, Empty)
 
 # def test_ResponseListener():
 #         LOGGER.info(' >>> test_ResponseListener START <<< ')
@@ -109,4 +131,4 @@ def test_SelectAnalytics(analyticsFrontend_client):
 #         class_obj = AnalyticsFrontendServiceServicerImpl()
 #         for response in class_obj.StartResponseListener(analyzer_id.analyzer_id.uuid):
 #             LOGGER.debug(response)
-#             assert isinstance(response, tuple)
+#             assert isinstance(response, tuple)
\ No newline at end of file
diff --git a/src/common/Constants.py b/src/common/Constants.py
index 2f3cef0a7e8351ffa7799bf405e90d816163f1ee..ae00f0b113ee586f150c59165349a8a5ded8d74e 100644
--- a/src/common/Constants.py
+++ b/src/common/Constants.py
@@ -62,6 +62,7 @@ class ServiceNameEnum(Enum):
     E2EORCHESTRATOR        = 'e2e-orchestrator'
     OPTICALCONTROLLER      = 'opticalcontroller'
     BGPLS                  = 'bgpls-speaker'
+    QKD_APP                = 'qkd_app'
     KPIMANAGER             = 'kpi-manager'
     KPIVALUEAPI            = 'kpi-value-api'
     KPIVALUEWRITER         = 'kpi-value-writer'
@@ -98,6 +99,7 @@ DEFAULT_SERVICE_GRPC_PORTS = {
     ServiceNameEnum.FORECASTER             .value : 10040,
     ServiceNameEnum.E2EORCHESTRATOR        .value : 10050,
     ServiceNameEnum.OPTICALCONTROLLER      .value : 10060,
+    ServiceNameEnum.QKD_APP                .value : 10070,
     ServiceNameEnum.BGPLS                  .value : 20030,
     ServiceNameEnum.KPIMANAGER             .value : 30010,
     ServiceNameEnum.KPIVALUEAPI            .value : 30020,
@@ -117,10 +119,12 @@ DEFAULT_SERVICE_HTTP_PORTS = {
     ServiceNameEnum.CONTEXT   .value : 8080,
     ServiceNameEnum.NBI       .value : 8080,
     ServiceNameEnum.WEBUI     .value : 8004,
+    ServiceNameEnum.QKD_APP   .value : 8005,
 }
 
 # Default HTTP/REST-API service base URLs
 DEFAULT_SERVICE_HTTP_BASEURLS = {
     ServiceNameEnum.NBI       .value : None,
     ServiceNameEnum.WEBUI     .value : None,
+    ServiceNameEnum.QKD_APP   .value : None,
 }
diff --git a/src/common/Settings.py b/src/common/Settings.py
index eaeb363adc1d9eadb9ddb0487abef8a0885ce380..13fcfc76966301599b0f5f39f2b188aea4e4d52a 100644
--- a/src/common/Settings.py
+++ b/src/common/Settings.py
@@ -79,12 +79,12 @@ def get_service_host(service_name : ServiceNameEnum):
 def get_service_port_grpc(service_name : ServiceNameEnum):
     envvar_name = get_env_var_name(service_name, ENVVAR_SUFIX_SERVICE_PORT_GRPC)
     default_value = DEFAULT_SERVICE_GRPC_PORTS.get(service_name.value)
-    return get_setting(envvar_name, default=default_value)
+    return int(get_setting(envvar_name, default=default_value))
 
 def get_service_port_http(service_name : ServiceNameEnum):
     envvar_name = get_env_var_name(service_name, ENVVAR_SUFIX_SERVICE_PORT_HTTP)
     default_value = DEFAULT_SERVICE_HTTP_PORTS.get(service_name.value)
-    return get_setting(envvar_name, default=default_value)
+    return int(get_setting(envvar_name, default=default_value))
 
 def get_service_baseurl_http(service_name : ServiceNameEnum):
     envvar_name = get_env_var_name(service_name, ENVVAR_SUFIX_SERVICE_BASEURL_HTTP)
@@ -95,16 +95,34 @@ def get_log_level():
     return get_setting(ENVVAR_LOG_LEVEL, default=DEFAULT_LOG_LEVEL)
 
 def get_metrics_port():
-    return get_setting(ENVVAR_METRICS_PORT, default=DEFAULT_METRICS_PORT)
+    return int(get_setting(ENVVAR_METRICS_PORT, default=DEFAULT_METRICS_PORT))
 
 def get_grpc_bind_address():
     return get_setting(ENVVAR_GRPC_BIND_ADDRESS, default=DEFAULT_GRPC_BIND_ADDRESS)
 
 def get_grpc_max_workers():
-    return get_setting(ENVVAR_GRPC_MAX_WORKERS, default=DEFAULT_GRPC_MAX_WORKERS)
+    return int(get_setting(ENVVAR_GRPC_MAX_WORKERS, default=DEFAULT_GRPC_MAX_WORKERS))
 
 def get_grpc_grace_period():
-    return get_setting(ENVVAR_GRPC_GRACE_PERIOD, default=DEFAULT_GRPC_GRACE_PERIOD)
+    return int(get_setting(ENVVAR_GRPC_GRACE_PERIOD, default=DEFAULT_GRPC_GRACE_PERIOD))
 
 def get_http_bind_address():
     return get_setting(ENVVAR_HTTP_BIND_ADDRESS, default=DEFAULT_HTTP_BIND_ADDRESS)
+
+
+##### ----- Detect deployed microservices ----- #####
+
+def is_microservice_deployed(service_name : ServiceNameEnum) -> bool:
+    host_env_var_name = get_env_var_name(service_name, ENVVAR_SUFIX_SERVICE_HOST     )
+    port_env_var_name = get_env_var_name(service_name, ENVVAR_SUFIX_SERVICE_PORT_GRPC)
+    return (host_env_var_name in os.environ) and (port_env_var_name in os.environ)
+
+def is_deployed_bgpls     () -> bool: return is_microservice_deployed(ServiceNameEnum.BGPLS            )
+def is_deployed_e2e_orch  () -> bool: return is_microservice_deployed(ServiceNameEnum.E2EORCHESTRATOR  )
+def is_deployed_forecaster() -> bool: return is_microservice_deployed(ServiceNameEnum.FORECASTER       )
+def is_deployed_load_gen  () -> bool: return is_microservice_deployed(ServiceNameEnum.LOAD_GENERATOR   )
+def is_deployed_optical   () -> bool: return is_microservice_deployed(ServiceNameEnum.OPTICALCONTROLLER)
+def is_deployed_policy    () -> bool: return is_microservice_deployed(ServiceNameEnum.POLICY           )
+def is_deployed_qkd_app   () -> bool: return is_microservice_deployed(ServiceNameEnum.QKD_APP          )
+def is_deployed_slice     () -> bool: return is_microservice_deployed(ServiceNameEnum.SLICE            )
+def is_deployed_te        () -> bool: return is_microservice_deployed(ServiceNameEnum.TE               )
diff --git a/src/kpi_manager/database/Kpi_DB.py b/src/common/tools/database/GenericDatabase.py
similarity index 58%
rename from src/kpi_manager/database/Kpi_DB.py
rename to src/common/tools/database/GenericDatabase.py
index 49ad9c9b579daa918818366a1d9505089968edc2..0cd41b9ef0c97263b56a5eda67b173f6ba61a997 100644
--- a/src/kpi_manager/database/Kpi_DB.py
+++ b/src/common/tools/database/GenericDatabase.py
@@ -12,52 +12,54 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+
 import logging
 import sqlalchemy_utils
+from .GenericEngine import Engine
+from sqlalchemy     import inspect
 from sqlalchemy.orm import sessionmaker
-from kpi_manager.database.KpiEngine import KpiEngine
-from kpi_manager.database.KpiModel import Kpi as KpiModel
-from common.method_wrappers.ServiceExceptions import ( 
-    AlreadyExistsException, OperationFailedException , NotFoundException)
+from common.Settings import get_setting
+
+from common.method_wrappers.ServiceExceptions import (OperationFailedException, AlreadyExistsException)
 
 LOGGER = logging.getLogger(__name__)
-DB_NAME = "tfs_kpi_mgmt"
 
-class KpiDB:
-    def __init__(self):
-        self.db_engine = KpiEngine.get_engine()
+class Database:
+    def __init__(self, model):
+        self.db_engine = Engine.get_engine()
         if self.db_engine is None:
             LOGGER.error('Unable to get SQLAlchemy DB Engine...')
-            return False
-        self.db_name = DB_NAME
-        self.Session = sessionmaker(bind=self.db_engine)
-
-    def create_database(self) -> None:
+            raise Exception('Failed to initialize the database engine.')
+        self.db_model = model
+        self.db_table = model.__name__
+        self.Session  = sessionmaker(bind=self.db_engine)
+    
+    def create_database(self):
         if not sqlalchemy_utils.database_exists(self.db_engine.url):
-            sqlalchemy_utils.create_database(self.db_engine.url)
             LOGGER.debug("Database created. {:}".format(self.db_engine.url))
+            sqlalchemy_utils.create_database(self.db_engine.url)
 
     def drop_database(self) -> None:
         if sqlalchemy_utils.database_exists(self.db_engine.url):
             sqlalchemy_utils.drop_database(self.db_engine.url)
 
     def create_tables(self):
-        # TODO: use "get_tables(declatrative class obj)" method of "sqlalchemy_utils" to verify tables.
         try:
-            KpiModel.metadata.create_all(self.db_engine)     # type: ignore
-            LOGGER.debug("Tables created in the DB Name: {:}".format(self.db_name))
+            self.db_model.metadata.create_all(self.db_engine)
+            LOGGER.debug("Tables created in the database: {:}".format(self.db_table))
         except Exception as e:
-            LOGGER.debug("Tables cannot be created in the kpi database. {:s}".format(str(e)))
+            LOGGER.debug("Tables cannot be created in the database. {:s}".format(str(e)))
             raise OperationFailedException ("Tables can't be created", extra_details=["unable to create table {:}".format(e)])
 
     def verify_tables(self):
         try:
-            with self.db_engine.connect() as connection:
-                result = connection.execute("SHOW TABLES;")
-                tables = result.fetchall()      # type: ignore
-                LOGGER.debug("Tables verified: {:}".format(tables))
+            inspect_object = inspect(self.db_engine)
+            if(inspect_object.has_table(self.db_table , None)):
+                LOGGER.info("Table exists in DB: {:}".format(self.db_name))
         except Exception as e:
-            LOGGER.debug("Unable to fetch Table names. {:s}".format(str(e)))
+            LOGGER.info("Unable to fetch Table names. {:s}".format(str(e)))    
+
+# ----------------- DB OPERATIONS ---------------------
 
     def add_row_to_db(self, row):
         session = self.Session()
@@ -70,7 +72,8 @@ class KpiDB:
             session.rollback()
             if "psycopg2.errors.UniqueViolation" in str(e):
                 LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}")
-                raise AlreadyExistsException(row.__class__.__name__, row, extra_details=["Unique key voilation: {:}".format(e)] )
+                raise AlreadyExistsException(row.__class__.__name__, row,
+                                             extra_details=["Unique key voilation: {:}".format(e)] )
             else:
                 LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}")
                 raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)])
@@ -89,6 +92,7 @@ class KpiDB:
                 print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search))
                 return None
         except Exception as e:
+            session.rollback()
             LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}")
             raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)])
         finally:
@@ -112,43 +116,24 @@ class KpiDB:
         finally:
             session.close()
 
-    def select_with_filter(self, model, filter_object):
-        session = self.Session()
+    def select_with_filter(self, query_object, session, model):
+        """
+        Generic method to apply filters dynamically based on filter.
+        params:     model_name:    SQLAlchemy model class name.
+                    query_object : Object that contains query with applied filters.
+                    session:       session of the query.
+        return:     List of filtered records.
+        """
         try:
-            query = session.query(KpiModel)
-            # Apply filters based on the filter_object
-            if filter_object.kpi_id:
-                query = query.filter(KpiModel.kpi_id.in_([k.kpi_id.uuid for k in filter_object.kpi_id]))
-
-            if filter_object.kpi_sample_type:
-                query = query.filter(KpiModel.kpi_sample_type.in_(filter_object.kpi_sample_type))
-
-            if filter_object.device_id:
-                query = query.filter(KpiModel.device_id.in_([d.device_uuid.uuid for d in filter_object.device_id]))
-
-            if filter_object.endpoint_id:
-                query = query.filter(KpiModel.endpoint_id.in_([e.endpoint_uuid.uuid for e in filter_object.endpoint_id]))
-
-            if filter_object.service_id:
-                query = query.filter(KpiModel.service_id.in_([s.service_uuid.uuid for s in filter_object.service_id]))
-
-            if filter_object.slice_id:
-                query = query.filter(KpiModel.slice_id.in_([s.slice_uuid.uuid for s in filter_object.slice_id]))
-
-            if filter_object.connection_id:
-                query = query.filter(KpiModel.connection_id.in_([c.connection_uuid.uuid for c in filter_object.connection_id]))
-
-            if filter_object.link_id:
-                query = query.filter(KpiModel.link_id.in_([l.link_uuid.uuid for l in filter_object.link_id]))
-            result = query.all()
-            
+            result = query_object.all()
+            # Log result and handle empty case
             if result:
-                LOGGER.debug(f"Fetched filtered rows from {model.__name__} table with filters: {filter_object}") #  - Results: {result}
+                LOGGER.debug(f"Fetched filtered rows from {model.__name__} with filters: {query_object}")
             else:
-                LOGGER.debug(f"No matching row found in {model.__name__} table with filters: {filter_object}")
+                LOGGER.warning(f"No matching rows found in {model.__name__} with filters: {query_object}")
             return result
         except Exception as e:
-            LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filter_object} ::: {e}")
-            raise OperationFailedException ("Select by filter", extra_details=["unable to apply the filter {:}".format(e)])
+            LOGGER.error(f"Error fetching filtered rows from {model.__name__} with filters {query_object} ::: {e}")
+            raise OperationFailedException("Select by filter", extra_details=[f"Unable to apply the filter: {e}"])
         finally:
             session.close()
diff --git a/src/analytics/database/AnalyzerEngine.py b/src/common/tools/database/GenericEngine.py
similarity index 92%
rename from src/analytics/database/AnalyzerEngine.py
rename to src/common/tools/database/GenericEngine.py
index 9294e09966ef9e13c9cfa3cab590e5d0c8b6a80e..18bb15360853524ed93606f3137972aa76aa850a 100644
--- a/src/analytics/database/AnalyzerEngine.py
+++ b/src/common/tools/database/GenericEngine.py
@@ -18,14 +18,14 @@ from common.Settings import get_setting
 LOGGER = logging.getLogger(__name__)
 CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}'
 
-class AnalyzerEngine:
+class Engine:
     @staticmethod
     def get_engine() -> sqlalchemy.engine.Engine:
         crdb_uri = get_setting('CRDB_URI', default=None)
         if crdb_uri is None:
             CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE')
             CRDB_SQL_PORT  = get_setting('CRDB_SQL_PORT')
-            CRDB_DATABASE  = "tfs-analyzer"             # TODO: define variable get_setting('CRDB_DATABASE_KPI_MGMT')
+            CRDB_DATABASE  = get_setting('CRDB_DATABASE')
             CRDB_USERNAME  = get_setting('CRDB_USERNAME')
             CRDB_PASSWORD  = get_setting('CRDB_PASSWORD')
             CRDB_SSLMODE   = get_setting('CRDB_SSLMODE')
diff --git a/src/common/tools/database/__init__.py b/src/common/tools/database/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/common/tools/database/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/common/tools/descriptor/Loader.py b/src/common/tools/descriptor/Loader.py
index f2f54bbd70eae2722f29e254495dddedadf2617b..2fcc5b63a66ae3c3e96ab774d34958a110f0454a 100644
--- a/src/common/tools/descriptor/Loader.py
+++ b/src/common/tools/descriptor/Loader.py
@@ -33,20 +33,25 @@
 #    # do test ...
 #    descriptor_loader.unload()
 
-import concurrent.futures, json, logging, operator
+import concurrent.futures, copy, json, logging, operator
 from typing import Any, Dict, List, Optional, Tuple, Union
 from common.proto.context_pb2 import (
-    Connection, Context, ContextId, Device, DeviceId, Empty, Link, LinkId, Service, ServiceId, Slice, SliceId,
-    Topology, TopologyId)
+    Connection, Context, ContextId, Device, DeviceId, Empty,
+    Link, LinkId, Service, ServiceId, Slice, SliceId,
+    Topology, TopologyId
+)
 from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 from service.client.ServiceClient import ServiceClient
 from slice.client.SliceClient import SliceClient
 from .Tools import (
-    format_device_custom_config_rules, format_service_custom_config_rules, format_slice_custom_config_rules,
-    get_descriptors_add_contexts, get_descriptors_add_services, get_descriptors_add_slices,
-    get_descriptors_add_topologies, split_controllers_and_network_devices, split_devices_by_rules)
+    format_device_custom_config_rules, format_service_custom_config_rules,
+    format_slice_custom_config_rules, get_descriptors_add_contexts,
+    get_descriptors_add_services, get_descriptors_add_slices,
+    get_descriptors_add_topologies, split_controllers_and_network_devices,
+    split_devices_by_rules
+)
 
 LOGGER = logging.getLogger(__name__)
 LOGGERS = {
@@ -78,6 +83,30 @@ TypeResults = List[Tuple[str, str, int, List[str]]] # entity_name, action, num_o
 TypeNotification = Tuple[str, str] # message, level
 TypeNotificationList = List[TypeNotification]
 
+SLICE_TEMPLATE = {
+    "slice_id": {
+        "context_id": {"context_uuid": {"uuid": "admin"}},
+        "slice_uuid": {"uuid": None}
+    },
+    "name": {},
+    "slice_config": {"config_rules": [
+        {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {
+            "address_families": ["IPV4"], "bgp_as": 65000,
+            "bgp_route_target": "65000:333", "mtu": 1512
+        }}}
+    ]},
+    "slice_constraints": [
+        {"sla_capacity": {"capacity_gbps": 20.0}},
+        {"sla_availability": {"availability": 20.0, "num_disjoint_paths": 1, "all_active": True}},
+        {"sla_isolation": {"isolation_level": [0]}}
+    ],
+    "slice_endpoint_ids": [
+
+    ],
+    "slice_status": {"slice_status": 1}
+}
+
+
 class DescriptorLoader:
     def __init__(
         self, descriptors : Optional[Union[str, Dict]] = None, descriptors_file : Optional[str] = None,
@@ -106,8 +135,53 @@ class DescriptorLoader:
         self.__links       = self.__descriptors.get('links'      , [])
         self.__services    = self.__descriptors.get('services'   , [])
         self.__slices      = self.__descriptors.get('slices'     , [])
+        self.__ietf_slices = self.__descriptors.get('ietf-network-slice-service:network-slice-services', {})
         self.__connections = self.__descriptors.get('connections', [])
 
+        if len(self.__ietf_slices) > 0:
+            for slice_service in self.__ietf_slices["slice-service"]:
+                tfs_slice = copy.deepcopy(SLICE_TEMPLATE)
+                tfs_slice["slice_id"]["slice_uuid"]["uuid"] = slice_service["id"]
+                tfs_slice["name"] = slice_service["description"]
+                for sdp in slice_service["sdps"]["sdp"]:
+                    sdp_id = sdp["id"]
+                    for attcircuit in sdp["attachment-circuits"]["attachment-circuit"]:
+                        att_cir_tp_id = attcircuit["ac-tp-id"]
+                        RESOURCE_KEY = "/device[{:s}]/endpoint[{:s}]/settings"
+                        resource_key = RESOURCE_KEY.format(str(sdp_id), str(att_cir_tp_id))
+
+                        for tag in attcircuit['ac-tags']['ac-tag']:
+                            if tag.get('tag-type') == 'ietf-nss:vlan-id':
+                                vlan_id = tag.get('value')
+                            else:
+                                vlan_id = 0
+
+                        tfs_slice["slice_config"]["config_rules"].append({
+                            "action": 1, "custom": {
+                                "resource_key": resource_key, "resource_value": {
+                                    "router_id": sdp.get("node-id",[]),
+                                    "sub_interface_index": 0,
+                                    "vlan_id": vlan_id
+                                }
+                            }
+                        })
+                        tfs_slice["slice_endpoint_ids"].append({
+                            "device_id": {"device_uuid": {"uuid": sdp_id}},
+                            "endpoint_uuid": {"uuid": att_cir_tp_id},
+                            "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, 
+                            "topology_uuid": {"uuid": "admin"}}
+                        })
+                        #tfs_slice["slice_constraints"].append({
+                        #    "endpoint_location": {
+                        #        "endpoint_id": {
+                        #            "device_id": {"device_uuid": {"uuid": sdp["id"]}},
+                        #            "endpoint_uuid": {"uuid": attcircuit["ac-tp-id"]}
+                        #        },
+                        #        "location": {"region": "4"}
+                        #    }
+                        #})
+                self.__slices.append(tfs_slice)
+
         self.__contexts_add   = None
         self.__topologies_add = None
         self.__devices_add    = None
@@ -232,7 +306,9 @@ class DescriptorLoader:
 
     def _load_dummy_mode(self) -> None:
         # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks.
+
         controllers, network_devices = split_controllers_and_network_devices(self.__devices)
+
         self.__ctx_cli.connect()
         self._process_descr('context',    'add',    self.__ctx_cli.SetContext,    Context,    self.__contexts_add  )
         self._process_descr('topology',   'add',    self.__ctx_cli.SetTopology,   Topology,   self.__topologies_add)
diff --git a/src/common/tools/kafka/Variables.py b/src/common/tools/kafka/Variables.py
index 9e432d637e70236d192d5248247175ef310d8368..8ff6447f7784fd9e6846c1b73bc176b0a49f25e1 100644
--- a/src/common/tools/kafka/Variables.py
+++ b/src/common/tools/kafka/Variables.py
@@ -30,7 +30,6 @@ class KafkaConfig(Enum):
             KFK_NAMESPACE        = get_setting('KFK_NAMESPACE')
             KFK_PORT             = get_setting('KFK_SERVER_PORT')
             kafka_server_address = KFK_SERVER_ADDRESS_TEMPLATE.format(KFK_NAMESPACE, KFK_PORT)
-        # kafka_server_address = "127.0.0.1:9092"
         return kafka_server_address
         
     @staticmethod
@@ -79,8 +78,8 @@ class KafkaTopic(Enum):
                 # LOGGER.debug("Existing topic list: {:}".format(topic_metadata.topics))
                 if topic not in topic_metadata.topics:
                     # If the topic does not exist, create a new topic
-                    # print("Topic {:} does not exist. Creating...".format(topic))
-                    # LOGGER.debug("Topic {:} does not exist. Creating...".format(topic))
+                    print("Topic {:} does not exist. Creating...".format(topic))
+                    LOGGER.debug("Topic {:} does not exist. Creating...".format(topic))
                     new_topic = NewTopic(topic, num_partitions=1, replication_factor=1)
                     KafkaConfig.get_admin_client().create_topics([new_topic])
                 else:
diff --git a/src/common/tools/object_factory/QKDApp.py b/src/common/tools/object_factory/QKDApp.py
new file mode 100644
index 0000000000000000000000000000000000000000..936a0f228302469ec51321623f8d8050f3daced0
--- /dev/null
+++ b/src/common/tools/object_factory/QKDApp.py
@@ -0,0 +1,24 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+from typing import Dict, List, Optional
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.tools.object_factory.Context import json_context_id
+
+
+def json_app_id(app_uuid : str, context_id : Optional[Dict] = None) -> Dict:
+    result = {'app_uuid': {'uuid': app_uuid}}
+    if context_id is not None: result['context_id'] = copy.deepcopy(context_id)
+    return result
diff --git a/src/common/tools/object_factory/Service.py b/src/common/tools/object_factory/Service.py
index 32b99a31f22072874ab894de2a87ce2b7d56ba85..b05821c7814ce250abca1819b111376af7c0430f 100644
--- a/src/common/tools/object_factory/Service.py
+++ b/src/common/tools/object_factory/Service.py
@@ -42,6 +42,16 @@ def json_service(
         'service_config'      : {'config_rules': copy.deepcopy(config_rules)},
     }
 
+def json_service_qkd_planned(
+        service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [],
+        config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME
+    ):
+
+    return json_service(
+        service_uuid, ServiceTypeEnum.SERVICETYPE_QKD, context_id=json_context_id(context_uuid),
+        status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints,
+        config_rules=config_rules)
+
 def json_service_l2nm_planned(
         service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [],
         config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME
diff --git a/src/device/.gitlab-ci.yml b/src/device/.gitlab-ci.yml
index 9106c96a8bed2cea406dd150fe656927311958cf..3ae6b2b20d22d0a91d10b61acd20fd0d2d4a28ac 100644
--- a/src/device/.gitlab-ci.yml
+++ b/src/device/.gitlab-ci.yml
@@ -38,6 +38,30 @@ build device:
       - manifests/${IMAGE_NAME}service.yaml
       - .gitlab-ci.yml
 
+## Start Mock QKD Nodes before unit testing
+#start_mock_nodes:
+#  stage: deploy
+#  script:
+#    - bash src/tests/tools/mock_qkd_nodes/start.sh &
+#    - sleep 10 # wait for nodes to spin up
+#  artifacts:
+#    paths:
+#      - mock_nodes.log
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+
+## Prepare Scenario (Start NBI, mock services)
+#prepare_scenario:
+#  stage: deploy
+#  script:
+#    - pytest src/tests/qkd/unit/PrepareScenario.py
+#  needs:
+#    - start_mock_nodes
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+
 # Apply unit test to the component
 unit_test device:
   variables:
@@ -46,6 +70,8 @@ unit_test device:
   stage: unit_test
   needs:
     - build device
+    #- start_mock_nodes
+    #- prepare_scenario
   before_script:
     - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
     - >
@@ -68,6 +94,7 @@ unit_test device:
     - docker logs $IMAGE_NAME
     - docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary_emulated.py --junitxml=/opt/results/${IMAGE_NAME}_report_emulated.xml"
     - docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary_ietf_actn.py --junitxml=/opt/results/${IMAGE_NAME}_report_ietf_actn.xml"
+    #- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/qkd/unit/test_*.py"
     - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
diff --git a/src/device/tests/qkd/unit/descriptorQKD_links.json b/src/device/tests/qkd/unit/descriptorQKD_links.json
new file mode 100644
index 0000000000000000000000000000000000000000..28a9e7d5ae014f78cfa0e554ee73a53449bba03c
--- /dev/null
+++ b/src/device/tests/qkd/unit/descriptorQKD_links.json
@@ -0,0 +1,77 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "QKD1"}}, "device_type": "qkd-node",
+            "device_operational_status": 0, "device_drivers": [12], "device_endpoints": [],
+            "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "11111"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "scheme": "http"
+                }}}
+            ]}
+
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "QKD2"}}, "device_type": "qkd-node",
+            "device_operational_status": 0, "device_drivers": [12], "device_endpoints": [],
+            "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "22222"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "scheme": "http"
+                }}}
+            ]}
+
+        },
+	{
+            "device_id": {"device_uuid": {"uuid": "QKD3"}}, "device_type": "qkd-node",
+            "device_operational_status": 0, "device_drivers": [12], "device_endpoints": [],
+            "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "33333"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "scheme": "http"
+                }}}
+            ]}
+
+        }
+    ],
+    "links": [
+	{
+            "link_id": {"link_uuid": {"uuid": "QKD1/10.0.2.10:1001==QKD2/10.0.2.10:2001"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "QKD1"}}, "endpoint_uuid": {"uuid": "10.0.2.10:1001"}},
+                {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2001"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "QKD2/10.0.2.10:2001==QKD1/10.0.2.10:1001"}},
+            "link_endpoint_ids": [
+		        {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2001"}},
+                {"device_id": {"device_uuid": {"uuid": "QKD1"}}, "endpoint_uuid": {"uuid": "10.0.2.10:1001"}}
+            ]
+        },
+	{
+            "link_id": {"link_uuid": {"uuid": "QKD2/10.0.2.10:2002==QKD3/10.0.2.10:3001"}},
+            "link_endpoint_ids": [
+		        {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2002"}},
+                {"device_id": {"device_uuid": {"uuid": "QKD3"}}, "endpoint_uuid": {"uuid": "10.0.2.10:3001"}}
+            ]
+        },
+	{
+            "link_id": {"link_uuid": {"uuid": "QKD3/10.0.2.10:3001==QKD2/10.0.2.10:2002"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "QKD3"}}, "endpoint_uuid": {"uuid": "10.0.2.10:3001"}},
+                {"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2002"}}
+            ]
+        }
+
+    ]
+}
diff --git a/src/device/tests/qkd/unit/test_application_deployment.py b/src/device/tests/qkd/unit/test_application_deployment.py
index 92e16663b41556563aab884be2ee48518cd15ff7..d10ddc523062e1377c7621edfdd42b57c32bad56 100644
--- a/src/device/tests/qkd/unit/test_application_deployment.py
+++ b/src/device/tests/qkd/unit/test_application_deployment.py
@@ -14,9 +14,11 @@
 
 import pytest
 import json
+import os
+os.environ['DEVICE_EMULATED_ONLY'] = 'YES'
 from device.service.drivers.qkd.QKDDriver2 import QKDDriver
 
-MOCK_QKD_ADDRRESS = '127.0.0.1'
+MOCK_QKD_ADDRRESS = '10.0.2.10'
 MOCK_PORT = 11111
 
 @pytest.fixture
diff --git a/src/device/tests/qkd/unit/test_create_apps.py b/src/device/tests/qkd/unit/test_create_apps.py
new file mode 100644
index 0000000000000000000000000000000000000000..4724e5c4abd04e28586d54ae6dbbbf0be3c7dc5c
--- /dev/null
+++ b/src/device/tests/qkd/unit/test_create_apps.py
@@ -0,0 +1,40 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import requests
+
+QKD_ADDRESS = '10.0.2.10'
+QKD_URL     = 'http://{:s}/qkd_app/create_qkd_app'.format(QKD_ADDRESS)
+
+QKD_REQUEST_1 = {
+    'app': {
+        'server_app_id': '1',
+        'client_app_id': [],
+        'app_status': 'ON',
+        'local_qkdn_id': '00000001-0000-0000-0000-0000000000',
+        'backing_qkdl_id': ['00000003-0002-0000-0000-0000000000']
+    }
+}
+print(requests.post(QKD_URL, json=QKD_REQUEST_1))
+
+QKD_REQUEST_2 = {
+    'app': {
+        'server_app_id': '1',
+        'client_app_id': [],
+        'app_status': 'ON',
+        'local_qkdn_id': '00000003-0000-0000-0000-0000000000',
+        'backing_qkdl_id': ['00000003-0002-0000-0000-0000000000']
+    }
+}
+print(requests.post(QKD_URL, json=QKD_REQUEST_2))
diff --git a/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py b/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py
index 150d00fd079b0a036f383653c833562279bb4d72..be9427d9b619423a61a3e6f5270d8aab76dc8955 100644
--- a/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py
+++ b/src/device/tests/qkd/unit/test_qkd_mock_connectivity.py
@@ -38,3 +38,4 @@ def test_qkd_driver_timeout_connection(mock_get, qkd_driver):
     mock_get.side_effect = requests.exceptions.Timeout
     qkd_driver.timeout = 0.001  # Simulate very short timeout
     assert qkd_driver.Connect() is False
+
diff --git a/src/kpi_manager/database/KpiDB.py b/src/kpi_manager/database/KpiDB.py
new file mode 100644
index 0000000000000000000000000000000000000000..d503f06f4cdeb57efd4c02701803f81fd31d3eea
--- /dev/null
+++ b/src/kpi_manager/database/KpiDB.py
@@ -0,0 +1,66 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from common.method_wrappers.Decorator import MetricsPool
+from common.tools.database.GenericDatabase import Database
+from common.method_wrappers.ServiceExceptions import OperationFailedException
+
+LOGGER       = logging.getLogger(__name__)
+METRICS_POOL = MetricsPool('KpiManager', 'Database')
+
+class KpiDB(Database):
+    def __init__(self, model) -> None:
+        LOGGER.info('Init KpiManagerService')
+        super().__init__(model)
+
+    def select_with_filter(self, model, filter_object):
+        """
+        Generic method to create filters dynamically based on filter_object attributes.
+        params:     model:         SQLAlchemy model class to query.
+                    filter_object: Object that contains filtering criteria as attributes.
+        return:     SQLAlchemy session, query and Model
+        """
+        session = self.Session()
+        try:
+            query = session.query(model)
+            # Apply filters based on the filter_object
+            if filter_object.kpi_id:
+                query = query.filter(model.kpi_id.in_([k.kpi_id.uuid for k in filter_object.kpi_id]))
+
+            if filter_object.kpi_sample_type:
+                query = query.filter(model.kpi_sample_type.in_(filter_object.kpi_sample_type))
+
+            if filter_object.device_id:
+                query = query.filter(model.device_id.in_([d.device_uuid.uuid for d in filter_object.device_id]))
+
+            if filter_object.endpoint_id:
+                query = query.filter(model.endpoint_id.in_([e.endpoint_uuid.uuid for e in filter_object.endpoint_id]))
+
+            if filter_object.service_id:
+                query = query.filter(model.service_id.in_([s.service_uuid.uuid for s in filter_object.service_id]))
+
+            if filter_object.slice_id:
+                query = query.filter(model.slice_id.in_([s.slice_uuid.uuid for s in filter_object.slice_id]))
+
+            if filter_object.connection_id:
+                query = query.filter(model.connection_id.in_([c.connection_uuid.uuid for c in filter_object.connection_id]))
+
+            if filter_object.link_id:
+                query = query.filter(model.link_id.in_([l.link_uuid.uuid for l in filter_object.link_id]))
+        except Exception as e:
+            LOGGER.error(f"Error creating filter of {model.__name__} table. ERROR: {e}")
+            raise OperationFailedException ("CreateKpiDescriptorFilter", extra_details=["unable to create the filter {:}".format(e)]) 
+        
+        return super().select_with_filter(query, session, model)
diff --git a/src/kpi_manager/service/KpiManagerServiceServicerImpl.py b/src/kpi_manager/service/KpiManagerServiceServicerImpl.py
index fd22474829ea0dfb6b1a25e70bbb4d5440c0216b..3f9ae8492380e5e11cd3cbc926a2fce07620d8a7 100644
--- a/src/kpi_manager/service/KpiManagerServiceServicerImpl.py
+++ b/src/kpi_manager/service/KpiManagerServiceServicerImpl.py
@@ -18,7 +18,8 @@ from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_m
 from common.proto.context_pb2 import Empty
 from common.proto.kpi_manager_pb2_grpc import KpiManagerServiceServicer
 from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList
-from kpi_manager.database.Kpi_DB import KpiDB
+# from kpi_manager.database.Kpi_DB import KpiDB
+from kpi_manager.database.KpiDB import KpiDB
 from kpi_manager.database.KpiModel import Kpi as KpiModel
 
 LOGGER = logging.getLogger(__name__)
@@ -27,7 +28,7 @@ METRICS_POOL = MetricsPool('KpiManager', 'NBIgRPC')
 class KpiManagerServiceServicerImpl(KpiManagerServiceServicer):
     def __init__(self):
         LOGGER.info('Init KpiManagerService')
-        self.kpi_db_obj = KpiDB()
+        self.kpi_db_obj = KpiDB(KpiModel)
     
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
     def SetKpiDescriptor(self, request: KpiDescriptor, grpc_context: grpc.ServicerContext # type: ignore
diff --git a/src/kpi_manager/service/__main__.py b/src/kpi_manager/service/__main__.py
index 244d5afa373a6462a0382a0ed26a588088a689a1..05e32bb58128975ea5d2a5f015d1e8b3977c9905 100644
--- a/src/kpi_manager/service/__main__.py
+++ b/src/kpi_manager/service/__main__.py
@@ -16,8 +16,11 @@ import logging, signal, sys, threading
 from common.Settings import get_log_level
 from .KpiManagerService import KpiManagerService
 
+from kpi_manager.database.KpiModel import Kpi as Model
+from common.tools.database.GenericDatabase import Database
+
 terminate = threading.Event()
-LOGGER = None
+LOGGER    = None
 
 def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
     LOGGER.warning('Terminate signal received')
@@ -35,6 +38,11 @@ def main():
 
     LOGGER.debug('Starting...')
 
+    # To create DB 
+    kpiDBobj = Database(Model)
+    kpiDBobj.create_database()
+    kpiDBobj.create_tables()
+    
     grpc_service = KpiManagerService()
     grpc_service.start()
 
diff --git a/src/kpi_manager/tests/__init__.py b/src/kpi_manager/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_manager/tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_manager/tests/test_kpi_db.py b/src/kpi_manager/tests/test_kpi_db.py
index d4a57f83664f851504389b3bbe99d5c2a92542d9..44c6df6a93b023c218a067f2131b7482347fdf1e 100644
--- a/src/kpi_manager/tests/test_kpi_db.py
+++ b/src/kpi_manager/tests/test_kpi_db.py
@@ -14,15 +14,38 @@
 
 
 import logging
-from kpi_manager.database.Kpi_DB import KpiDB
+#from common.proto.kpi_manager_pb2 import KpiDescriptorList
+#from .test_messages import create_kpi_filter_request
+from kpi_manager.database.KpiDB import KpiDB
+from kpi_manager.database.KpiModel import Kpi as KpiModel
+# from common.tools.database.GenericDatabase import Database
 
 LOGGER = logging.getLogger(__name__)
 
 def test_verify_databases_and_Tables():
     LOGGER.info('>>> test_verify_Tables : START <<< ')
-    kpiDBobj = KpiDB()
+    kpiDBobj = KpiDB(KpiModel)
     # kpiDBobj.drop_database()
     # kpiDBobj.verify_tables()
     kpiDBobj.create_database()
     kpiDBobj.create_tables()
     kpiDBobj.verify_tables()
+
+# def test_generic_DB_select_method():
+#     LOGGER.info("--> STARTED-test_generic_DB_select_method")
+#     kpi_obj  = KpiDB()
+#     _filter  = create_kpi_filter_request()
+#     # response = KpiDescriptorList()
+#     try:
+#          kpi_obj.select_with_filter(KpiModel, _filter)
+#     except Exception as e:
+#         LOGGER.error('Unable to apply filter on kpi descriptor. {:}'.format(e))
+#     LOGGER.info("--> FINISHED-test_generic_DB_select_method")
+#     # try:
+#     #     for row in rows:
+#     #         kpiDescriptor_obj = KpiModel.convert_row_to_KpiDescriptor(row)
+#     #         response.kpi_descriptor_list.append(kpiDescriptor_obj)
+#     #     return response
+#     # except Exception as e:
+#     #     LOGGER.info('Unable to process filter response {:}'.format(e))
+#     # assert isinstance(r)
diff --git a/src/kpi_manager/tests/test_kpi_manager.py b/src/kpi_manager/tests/test_kpi_manager.py
index 219fdadee9e2f4ca9ea9ac0be040043d4edfbdbe..06e836b70963768b375ab04e29a640591b283108 100755
--- a/src/kpi_manager/tests/test_kpi_manager.py
+++ b/src/kpi_manager/tests/test_kpi_manager.py
@@ -139,9 +139,9 @@ def test_SelectKpiDescriptor(kpi_manager_client):
     LOGGER.info("Response gRPC message object: {:}".format(response))
     assert isinstance(response, KpiDescriptorList)
 
-def test_set_list_of_KPIs(kpi_manager_client):
-    LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ")
-    KPIs_TO_SEARCH = ["node_in_power_total", "node_in_current_total", "node_out_power_total"]
-    # adding KPI
-    for kpi in KPIs_TO_SEARCH:
-       kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(kpi))
+# def test_set_list_of_KPIs(kpi_manager_client):
+#     LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ")
+#     KPIs_TO_SEARCH = ["node_in_power_total", "node_in_current_total", "node_out_power_total"]
+#     # adding KPI
+#     for kpi in KPIs_TO_SEARCH:
+#        kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(kpi))
diff --git a/src/kpi_value_api/.gitlab-ci.yml b/src/kpi_value_api/.gitlab-ci.yml
index fef96d5b5bb6d4b33ea83419cb15051fdbf1d5ff..14c8df299b1a4970ec0a4733bcd918bf1485b00d 100644
--- a/src/kpi_value_api/.gitlab-ci.yml
+++ b/src/kpi_value_api/.gitlab-ci.yml
@@ -59,6 +59,7 @@ unit_test kpi-value-api:
     - docker pull "bitnami/kafka:latest"
     - >
       docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
+      --env ALLOW_ANONYMOUS_LOGIN=yes
       bitnami/zookeeper:latest
     - sleep 10 # Wait for Zookeeper to start
     - >
@@ -85,6 +86,8 @@ unit_test kpi-value-api:
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
     - docker rm -f $IMAGE_NAME
+    - docker rm -f kafka
+    - docker rm -f zookeeper
     - docker network rm teraflowbridge
   rules:
     - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
diff --git a/src/kpi_value_writer/.gitlab-ci.yml b/src/kpi_value_writer/.gitlab-ci.yml
index 4b36165d02327332766197676d76da098a857045..3f376a6739ebf72964d889e0c43e04f8daed6069 100644
--- a/src/kpi_value_writer/.gitlab-ci.yml
+++ b/src/kpi_value_writer/.gitlab-ci.yml
@@ -59,6 +59,7 @@ unit_test kpi-value-writer:
     - docker pull "bitnami/kafka:latest"
     - >
       docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
+      --env ALLOW_ANONYMOUS_LOGIN=yes
       bitnami/zookeeper:latest
     - sleep 10 # Wait for Zookeeper to start
     - >
@@ -77,6 +78,8 @@ unit_test kpi-value-writer:
       $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
     - sleep 5
     - docker ps -a
+    - docker logs zookeeper
+    - docker logs kafka
     - docker logs $IMAGE_NAME
     - >
       docker exec -i $IMAGE_NAME bash -c
@@ -85,8 +88,8 @@ unit_test kpi-value-writer:
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
     - docker rm -f $IMAGE_NAME
-    - docker rm -f zookeeper
     - docker rm -f kafka
+    - docker rm -f zookeeper
     - docker network rm teraflowbridge
   rules:
     - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_network/NameMapping.py b/src/nbi/service/rest_server/nbi_plugins/ietf_network/NameMapping.py
index 0c10559115f4e4ba9e5b2468e36cf7f917c25f51..94e4723a5c7ca83fb382bb70cb241cb69b66ce0e 100644
--- a/src/nbi/service/rest_server/nbi_plugins/ietf_network/NameMapping.py
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_network/NameMapping.py
@@ -19,7 +19,7 @@ class NameMappings:
     def __init__(self) -> None:
         self._device_uuid_to_name   : Dict[str,             str] = dict()
         self._endpoint_uuid_to_name : Dict[Tuple[str, str], str] = dict()
-    
+
     def store_device_name(self, device : Device) -> None:
         device_uuid = device.device_id.device_uuid.uuid
         device_name = device.name
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_network/Networks.py b/src/nbi/service/rest_server/nbi_plugins/ietf_network/Networks.py
index 5d663b8b3071856bc9cd204ee911c61b368ebe97..0198d418f7e5f0058ce40fbcd7b6010a168e1c51 100644
--- a/src/nbi/service/rest_server/nbi_plugins/ietf_network/Networks.py
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_network/Networks.py
@@ -12,19 +12,23 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import json, logging
+import enum, json, logging
 import pyangbind.lib.pybindJSON as pybindJSON
 from flask import request
 from flask.json import jsonify
 from flask_restful import Resource
 from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
+from common.Settings import get_setting
+from common.proto.context_pb2 import ContextId, Empty
 from common.tools.context_queries.Topology import get_topology_details
+from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from nbi.service.rest_server.nbi_plugins.tools.Authentication import HTTP_AUTH
 from nbi.service.rest_server.nbi_plugins.tools.HttpStatusCodes import HTTP_OK, HTTP_SERVERERROR
 from .bindings import ietf_network
 from .ComposeNetwork import compose_network
 from .ManualFixes import manual_fixes
+from .YangHandler import YangHandler
 
 LOGGER = logging.getLogger(__name__)
 
@@ -33,6 +37,14 @@ TE_TOPOLOGY_NAMES = [
     'providerId-10-clientId-0-topologyId-2'
 ]
 
+class Renderer(enum.Enum):
+    LIBYANG   = 'LIBYANG'
+    PYANGBIND = 'PYANGBIND'
+
+DEFAULT_RENDERER = Renderer.LIBYANG
+USE_RENDERER = get_setting('IETF_NETWORK_RENDERER', default=DEFAULT_RENDERER.value)
+
+
 class Networks(Resource):
     @HTTP_AUTH.login_required
     def get(self):
@@ -40,31 +52,59 @@ class Networks(Resource):
         topology_id = ''
         try:
             context_client = ContextClient()
-            #target = get_slice_by_uuid(context_client, vpn_id, rw_copy=True)
-            #if target is None:
-            #    raise Exception('VPN({:s}) not found in database'.format(str(vpn_id)))
 
-            ietf_nets = ietf_network()
+            if USE_RENDERER == Renderer.PYANGBIND.value:
+                #target = get_slice_by_uuid(context_client, vpn_id, rw_copy=True)
+                #if target is None:
+                #    raise Exception('VPN({:s}) not found in database'.format(str(vpn_id)))
+
+                ietf_nets = ietf_network()
+
+                topology_details = get_topology_details(
+                    context_client, DEFAULT_TOPOLOGY_NAME, context_uuid=DEFAULT_CONTEXT_NAME,
+                    #rw_copy=True
+                )
+                if topology_details is None:
+                    MSG = 'Topology({:s}/{:s}) not found'
+                    raise Exception(MSG.format(DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME))
 
-            topology_details = get_topology_details(
-                context_client, DEFAULT_TOPOLOGY_NAME, context_uuid=DEFAULT_CONTEXT_NAME, #rw_copy=True
-            )
-            if topology_details is None:
-                MSG = 'Topology({:s}/{:s}) not found'
-                raise Exception(MSG.format(DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME))
+                for te_topology_name in TE_TOPOLOGY_NAMES:
+                    ietf_net = ietf_nets.networks.network.add(te_topology_name)
+                    compose_network(ietf_net, te_topology_name, topology_details)
 
-            for te_topology_name in TE_TOPOLOGY_NAMES:
-                ietf_net = ietf_nets.networks.network.add(te_topology_name)
-                compose_network(ietf_net, te_topology_name, topology_details)
+                # TODO: improve these workarounds to enhance performance
+                json_response = json.loads(pybindJSON.dumps(ietf_nets, mode='ietf'))
+                
+                # Workaround; pyangbind does not allow to set otn_topology / eth-tran-topology
+                manual_fixes(json_response)
+            elif USE_RENDERER == Renderer.LIBYANG.value:
+                yang_handler = YangHandler()
+                json_response = []
 
-            # TODO: improve these workarounds to enhance performance
-            json_response = json.loads(pybindJSON.dumps(ietf_nets, mode='ietf'))
-            
-            # Workaround; pyangbind does not allow to set otn_topology / eth-tran-topology
-            manual_fixes(json_response)
+                contexts = context_client.ListContexts(Empty()).contexts
+                context_names = [context.name for context in contexts]
+                LOGGER.info(f'Contexts detected: {context_names}')
+
+                for context_name in context_names:
+                    topologies = context_client.ListTopologies(ContextId(**json_context_id(context_name))).topologies
+                    topology_names = [topology.name for topology in topologies]
+                    LOGGER.info(f'Topologies detected for context {context_name}: {topology_names}')
+
+                    for topology_name in topology_names:
+                        topology_details = get_topology_details(context_client, topology_name, context_name)
+                        if topology_details is None:
+                            raise Exception(f'Topology({context_name}/{topology_name}) not found')
+
+                        network_reply = yang_handler.compose_network(topology_name, topology_details)
+                        json_response.append(network_reply)
+
+                yang_handler.destroy()
+            else:
+                raise Exception('Unsupported Renderer: {:s}'.format(str(USE_RENDERER)))
 
             response = jsonify(json_response)
             response.status_code = HTTP_OK
+
         except Exception as e: # pylint: disable=broad-except
             LOGGER.exception('Something went wrong Retrieving Topology({:s})'.format(str(topology_id)))
             response = jsonify({'error': str(e)})
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_network/YangHandler.py b/src/nbi/service/rest_server/nbi_plugins/ietf_network/YangHandler.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5dda280c98c060c2f872df5ab17152880b522d5
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_network/YangHandler.py
@@ -0,0 +1,117 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import libyang, logging, os
+from typing import Any
+from common.proto.context_pb2 import TopologyDetails, Device, Link
+from .NameMapping import NameMappings
+from context.client.ContextClient import ContextClient
+from common.tools.object_factory.Device import json_device_id
+from common.proto.context_pb2 import DeviceId
+
+LOGGER = logging.getLogger(__name__)
+
+YANG_DIR = os.path.join(os.path.dirname(__file__), 'yang')
+YANG_MODULES = ['ietf-network', 'ietf-network-topology', 'ietf-l3-unicast-topology']
+
+class YangHandler:
+    def __init__(self) -> None:
+        self._yang_context = libyang.Context(YANG_DIR)
+        for yang_module_name in YANG_MODULES:
+            LOGGER.info('Loading module: {:s}'.format(str(yang_module_name)))
+            self._yang_context.load_module(yang_module_name).feature_enable_all()
+
+    def compose_network(self, te_topology_name: str, topology_details: TopologyDetails) -> dict:
+        networks = self._yang_context.create_data_path('/ietf-network:networks')
+        network = networks.create_path(f'network[network-id="{te_topology_name}"]')
+        network.create_path('network-id', te_topology_name)
+
+        network_types = network.create_path('network-types') 
+        network_types.create_path('ietf-l3-unicast-topology:l3-unicast-topology') 
+
+        name_mappings = NameMappings()
+
+        for device in topology_details.devices: 
+            self.compose_node(device, name_mappings, network)
+
+        for link in topology_details.links:
+            self.compose_link(link, name_mappings, network)
+
+        return json.loads(networks.print_mem('json'))
+
+    def compose_node(self, dev: Device, name_mappings: NameMappings, network: Any) -> None:                                     
+        device_name = dev.name
+        name_mappings.store_device_name(dev)
+
+        node = network.create_path(f'node[node-id="{device_name}"]')
+        node.create_path('node-id', device_name)
+        node_attributes = node.create_path('ietf-l3-unicast-topology:l3-node-attributes')
+        node_attributes.create_path('name', device_name)
+
+        context_client = ContextClient()
+        device = context_client.GetDevice(DeviceId(**json_device_id(device_name)))
+
+        for endpoint in device.device_endpoints:
+            name_mappings.store_endpoint_name(dev, endpoint)
+
+        self._process_device_config(device, node)
+
+    def _process_device_config(self, device: Device, node: Any) -> None:
+        for config in device.device_config.config_rules:
+            if config.WhichOneof('config_rule') != 'custom' or '/interface[' not in config.custom.resource_key:
+                continue
+
+            for endpoint in device.device_endpoints:
+                endpoint_name = endpoint.name
+                if f'/interface[{endpoint_name}]' in config.custom.resource_key or f'/interface[{endpoint_name}.' in config.custom.resource_key:
+                    interface_name = config.custom.resource_key.split('interface[')[1].split(']')[0]
+                    self._create_termination_point(node, interface_name, endpoint_name, config.custom.resource_value)
+
+    def _create_termination_point(self, node: Any, interface_name: str, endpoint_name: str, resource_value: str) -> None:
+        ip_addresses = self._extract_ip_addresses(json.loads(resource_value))
+        if ip_addresses:
+            tp = node.create_path(f'ietf-network-topology:termination-point[tp-id="{interface_name}"]')
+            tp.create_path('tp-id', interface_name)
+            tp_attributes = tp.create_path('ietf-l3-unicast-topology:l3-termination-point-attributes')
+
+            for ip in ip_addresses:
+                tp_attributes.create_path('ip-address', ip)
+            tp_attributes.create_path('interface-name', endpoint_name)
+
+    @staticmethod
+    def _extract_ip_addresses(resource_value: dict) -> list:
+        ip_addresses = []
+        if 'address_ip' in resource_value:
+            ip_addresses.append(resource_value['address_ip'])
+        if 'address_ipv6' in resource_value:
+            ip_addresses.append(resource_value['address_ipv6'])
+        return ip_addresses
+
+    def compose_link(self, link_specs: Link, name_mappings: NameMappings, network: Any) -> None:
+        link_name = link_specs.name
+        links = network.create_path(f'ietf-network-topology:link[link-id="{link_name}"]')
+        links.create_path('link-id', link_name)
+
+        self._create_link_endpoint(links, 'source', link_specs.link_endpoint_ids[0], name_mappings)
+        self._create_link_endpoint(links, 'destination', link_specs.link_endpoint_ids[-1], name_mappings)
+
+    def _create_link_endpoint(self, links: Any, endpoint_type: str, endpoint_id: Any, name_mappings: NameMappings) -> None:
+        endpoint = links.create_path(endpoint_type)
+        if endpoint_type == 'destination': endpoint_type = 'dest'
+        endpoint.create_path(f'{endpoint_type}-node', name_mappings.get_device_name(endpoint_id.device_id))
+        endpoint.create_path(f'{endpoint_type}-tp', name_mappings.get_endpoint_name(endpoint_id))
+
+    def destroy(self) -> None:
+        self._yang_context.destroy()
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_network/yang/ietf-l3-unicast-topology@2018-02-26.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_network/yang/ietf-l3-unicast-topology@2018-02-26.yang
new file mode 100644
index 0000000000000000000000000000000000000000..39fcebd767bf7ea687de523b0dd0ba731d3c80e7
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_network/yang/ietf-l3-unicast-topology@2018-02-26.yang
@@ -0,0 +1,359 @@
+module ietf-l3-unicast-topology {
+  yang-version 1.1;
+  namespace
+    "urn:ietf:params:xml:ns:yang:ietf-l3-unicast-topology";
+  prefix "l3t";
+  import ietf-network {
+    prefix "nw";
+  }
+  import ietf-network-topology {
+    prefix "nt";
+  }
+  import ietf-inet-types {
+    prefix "inet";
+  }
+  import ietf-routing-types {
+    prefix "rt-types";
+  }
+  organization
+    "IETF I2RS (Interface to the Routing System) Working Group";
+  contact
+    "WG Web:    <https://datatracker.ietf.org/wg/i2rs/>
+     WG List:   <mailto:i2rs@ietf.org>
+     Editor:    Alexander Clemm
+                <mailto:ludwig@clemm.org>
+     Editor:    Jan Medved
+                <mailto:jmedved@cisco.com>
+     Editor:    Robert Varga
+                <mailto:robert.varga@pantheon.tech>
+     Editor:    Xufeng Liu
+                <mailto:xufeng.liu.ietf@gmail.com>
+     Editor:    Nitin Bahadur
+                <mailto:nitin_bahadur@yahoo.com>
+     Editor:    Hariharan Ananthakrishnan
+                <mailto:hari@packetdesign.com>";
+  description
+    "This module defines a model for Layer 3 Unicast
+     topologies.
+
+     Copyright (c) 2018 IETF Trust and the persons identified as
+     authors of the code.  All rights reserved.
+
+     Redistribution and use in source and binary forms, with or
+     without modification, is permitted pursuant to, and subject
+     to the license terms contained in, the Simplified BSD License
+     set forth in Section 4.c of the IETF Trust's Legal Provisions
+     Relating to IETF Documents
+     (https://trustee.ietf.org/license-info).
+
+     This version of this YANG module is part of
+     RFC 8346; see the RFC itself for full legal notices.";
+  revision "2018-02-26" {
+    description
+      "Initial revision.";
+    reference
+      "RFC 8346: A YANG Data Model for Layer 3 Topologies";
+  }
+
+  identity flag-identity {
+    description "Base type for flags";
+  }
+
+  typedef l3-event-type {
+    type enumeration {
+      enum "add" {
+        description
+          "A Layer 3 node, link, prefix, or termination point has
+          been added";
+      }
+      enum "remove" {
+        description
+          "A Layer 3 node, link, prefix, or termination point has
+          been removed";
+      }
+      enum "update" {
+        description
+          "A Layer 3 node, link, prefix, or termination point has
+          been updated";
+      }
+    }
+    description "Layer 3 event type for notifications";
+  }
+
+  typedef prefix-flag-type {
+    type identityref {
+      base "flag-identity";
+    }
+    description "Prefix flag attributes";
+  }
+
+  typedef node-flag-type {
+    type identityref {
+      base "flag-identity";
+    }
+    description "Node flag attributes";
+  }
+
+  typedef link-flag-type {
+    type identityref {
+      base "flag-identity";
+    }
+    description "Link flag attributes";
+  }
+
+  typedef l3-flag-type {
+    type identityref {
+      base "flag-identity";
+    }
+    description "L3 flag attributes";
+  }
+
+  grouping l3-prefix-attributes {
+    description
+      "L3 prefix attributes";
+    leaf prefix {
+      type inet:ip-prefix;
+      description
+        "IP prefix value";
+    }
+    leaf metric {
+      type uint32;
+      description
+        "Prefix metric";
+    }
+    leaf-list flag {
+      type prefix-flag-type;
+      description
+        "Prefix flags";
+    }
+  }
+  grouping l3-unicast-topology-type {
+    description "Identifies the topology type to be L3 Unicast.";
+    container l3-unicast-topology {
+      presence "indicates L3 Unicast topology";
+      description
+        "The presence of the container node indicates L3 Unicast
+        topology";
+    }
+  }
+  grouping l3-topology-attributes {
+    description "Topology scope attributes";
+    container l3-topology-attributes {
+      description "Contains topology attributes";
+      leaf name {
+        type string;
+        description
+          "Name of the topology";
+      }
+      leaf-list flag {
+        type l3-flag-type;
+        description
+          "Topology flags";
+      }
+    }
+  }
+  grouping l3-node-attributes {
+    description "L3 node scope attributes";
+    container l3-node-attributes {
+      description
+        "Contains node attributes";
+      leaf name {
+        type inet:domain-name;
+        description
+          "Node name";
+      }
+      leaf-list flag {
+        type node-flag-type;
+        description
+          "Node flags";
+      }
+      leaf-list router-id {
+        type rt-types:router-id;
+        description
+          "Router-id for the node";
+      }
+      list prefix {
+        key "prefix";
+        description
+          "A list of prefixes along with their attributes";
+        uses l3-prefix-attributes;
+      }
+    }
+  }
+  grouping l3-link-attributes {
+    description
+      "L3 link scope attributes";
+    container l3-link-attributes {
+      description
+        "Contains link attributes";
+      leaf name {
+        type string;
+        description
+          "Link Name";
+      }
+      leaf-list flag {
+        type link-flag-type;
+        description
+          "Link flags";
+      }
+      leaf metric1 {
+        type uint64;
+        description
+            "Link Metric 1";
+      }
+      leaf metric2 {
+        type uint64;
+        description
+            "Link Metric 2";
+      }
+    }
+  }
+  grouping l3-termination-point-attributes {
+    description "L3 termination point scope attributes";
+    container l3-termination-point-attributes {
+      description
+        "Contains termination point attributes";
+      choice termination-point-type {
+        description
+          "Indicates the termination point type";
+        case ip {
+          leaf-list ip-address {
+            type inet:ip-address;
+            description
+              "IPv4 or IPv6 address.";
+          }
+        }
+        case unnumbered {
+          leaf unnumbered-id {
+            type uint32;
+            description
+              "Unnumbered interface identifier.
+               The identifier will correspond to the ifIndex value
+               of the interface, i.e., the ifIndex value of the
+               ifEntry that represents the interface in
+               implementations where the Interfaces Group MIB
+               (RFC 2863) is supported.";
+            reference
+              "RFC 2863: The Interfaces Group MIB";
+          }
+        }
+        case interface-name {
+          leaf interface-name {
+            type string;
+            description
+              "Name of the interface.  The name can (but does not
+               have to) correspond to an interface reference of a
+               containing node's interface, i.e., the path name of a
+               corresponding interface data node on the containing
+               node reminiscent of data type interface-ref defined
+               in RFC 8343. It should be noted that data type
+               interface-ref of RFC 8343 cannot be used directly,
+
+               as this data type is used to reference an interface
+               in a datastore of a single node in the network, not
+               to uniquely reference interfaces across a network.";
+            reference
+              "RFC 8343: A YANG Data Model for Interface Management";
+          }
+        }
+      }
+    }
+  }
+  augment "/nw:networks/nw:network/nw:network-types" {
+    description
+      "Introduces new network type for L3 Unicast topology";
+    uses l3-unicast-topology-type;
+  }
+  augment "/nw:networks/nw:network" {
+    when "nw:network-types/l3t:l3-unicast-topology" {
+      description
+        "Augmentation parameters apply only for networks with
+        L3 Unicast topology";
+    }
+    description
+        "L3 Unicast for the network as a whole";
+    uses l3-topology-attributes;
+  }
+  augment "/nw:networks/nw:network/nw:node" {
+    when "../nw:network-types/l3t:l3-unicast-topology" {
+      description
+        "Augmentation parameters apply only for networks with
+        L3 Unicast topology";
+    }
+    description
+        "L3 Unicast node-level attributes ";
+    uses l3-node-attributes;
+  }
+  augment "/nw:networks/nw:network/nt:link" {
+    when "../nw:network-types/l3t:l3-unicast-topology" {
+      description
+        "Augmentation parameters apply only for networks with
+        L3 Unicast topology";
+    }
+    description
+      "Augments topology link attributes";
+    uses l3-link-attributes;
+  }
+  augment "/nw:networks/nw:network/nw:node/"
+         +"nt:termination-point" {
+    when "../../nw:network-types/l3t:l3-unicast-topology" {
+      description
+        "Augmentation parameters apply only for networks with
+        L3 Unicast topology";
+    }
+    description "Augments topology termination point configuration";
+    uses l3-termination-point-attributes;
+  }
+  notification l3-node-event {
+    description
+      "Notification event for L3 node";
+    leaf l3-event-type {
+      type l3-event-type;
+      description
+        "Event type";
+    }
+    uses nw:node-ref;
+    uses l3-unicast-topology-type;
+    uses l3-node-attributes;
+  }
+  notification l3-link-event {
+    description
+      "Notification event for L3 link";
+    leaf l3-event-type {
+      type l3-event-type;
+      description
+        "Event type";
+    }
+    uses nt:link-ref;
+    uses l3-unicast-topology-type;
+    uses l3-link-attributes;
+  }
+  notification l3-prefix-event {
+    description
+      "Notification event for L3 prefix";
+    leaf l3-event-type {
+      type l3-event-type;
+      description
+        "Event type";
+    }
+    uses nw:node-ref;
+    uses l3-unicast-topology-type;
+    container prefix {
+      description
+        "Contains L3 prefix attributes";
+      uses l3-prefix-attributes;
+    }
+  }
+  notification termination-point-event {
+    description
+      "Notification event for L3 termination point";
+    leaf l3-event-type {
+      type l3-event-type;
+      description
+        "Event type";
+    }
+    uses nt:tp-ref;
+    uses l3-unicast-topology-type;
+    uses l3-termination-point-attributes;
+  }
+}
diff --git a/src/nbi/tests/test_ietf_network.py b/src/nbi/tests/test_ietf_network.py
index 9a25e1b3b5e0ee202a0af945e88794f8aa9b0ec4..ec03d3798ded3efd027a0b8237becc865441fc98 100644
--- a/src/nbi/tests/test_ietf_network.py
+++ b/src/nbi/tests/test_ietf_network.py
@@ -12,14 +12,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import deepdiff, json, logging, operator
+import deepdiff, json, logging, operator, os
 from typing import Dict
 from common.Constants import DEFAULT_CONTEXT_NAME
 from common.proto.context_pb2 import ContextId
-from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
+from common.tools.descriptor.Loader import (
+    DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
+)
 from common.tools.object_factory.Context import json_context_id
 from context.client.ContextClient import ContextClient
 from nbi.service.rest_server import RestServer
+
+# Explicitly state NBI to use PyangBind Renderer for this test
+os.environ['IETF_NETWORK_RENDERER'] = 'PYANGBIND'
+
 from .PrepareTestScenario import ( # pylint: disable=unused-import
     # be careful, order of symbols is important here!
     do_rest_get_request, mock_service, nbi_service_rest, osm_wim, context_client
diff --git a/src/pathcomp/frontend/Config.py b/src/pathcomp/frontend/Config.py
index 08de81b47dd05ce19ac8335b5b31df8ef4ee461e..ab431acb92ac3732ff98bf7228d825c92d279986 100644
--- a/src/pathcomp/frontend/Config.py
+++ b/src/pathcomp/frontend/Config.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import os
-from common.Settings import get_setting
+from common.Settings import get_setting, is_deployed_forecaster
 
 DEFAULT_PATHCOMP_BACKEND_SCHEME  = 'http'
 DEFAULT_PATHCOMP_BACKEND_HOST    = '127.0.0.1'
@@ -44,6 +44,7 @@ SETTING_NAME_ENABLE_FORECASTER = 'ENABLE_FORECASTER'
 TRUE_VALUES = {'Y', 'YES', 'TRUE', 'T', 'E', 'ENABLE', 'ENABLED'}
 
 def is_forecaster_enabled() -> bool:
+    if not is_deployed_forecaster(): return False
     is_enabled = get_setting(SETTING_NAME_ENABLE_FORECASTER, default=None)
     if is_enabled is None: return False
     str_is_enabled = str(is_enabled).upper()
diff --git a/src/qkd_app/.gitlab-ci.yml b/src/qkd_app/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..85ee2b5e04bce3077086d0cc831043483a995aef
--- /dev/null
+++ b/src/qkd_app/.gitlab-ci.yml
@@ -0,0 +1,80 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+build app:
+  variables:
+    IMAGE_NAME: 'qkd_app' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
+    - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+
+# Apply unit test to the component
+unit_test app:
+  variables:
+    IMAGE_NAME: 'qkd_app' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build app
+    - unit_test service
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
+    - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker run --name $IMAGE_NAME -d -p 10070:10070 -p 8005:8005 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+    - sleep 5
+    - docker ps -a
+    - docker logs $IMAGE_NAME
+    - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
+    - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+  coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    - docker rm -f $IMAGE_NAME
+    - docker network rm teraflowbridge
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - src/$IMAGE_NAME/tests/Dockerfile
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+  artifacts:
+      when: always
+      reports:
+        junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
diff --git a/src/qkd_app/Config.py b/src/qkd_app/Config.py
new file mode 100644
index 0000000000000000000000000000000000000000..07d08814021ef82220611ee21c01ba01806682e9
--- /dev/null
+++ b/src/qkd_app/Config.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/qkd_app/Dockerfile b/src/qkd_app/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..4d2b8d61bd61ca98d62316b021d2486de3777977
--- /dev/null
+++ b/src/qkd_app/Dockerfile
@@ -0,0 +1,70 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ git && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/qkd_app
+WORKDIR /var/teraflow/qkd_app
+COPY src/qkd_app/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/context/. context/
+COPY src/service/. service/
+COPY src/qkd_app/. qkd_app/
+
+# Start the service
+ENTRYPOINT ["python", "-m", "qkd_app.service"]
diff --git a/src/qkd_app/__init__.py b/src/qkd_app/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..07d08814021ef82220611ee21c01ba01806682e9
--- /dev/null
+++ b/src/qkd_app/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/qkd_app/client/QKDAppClient.py b/src/qkd_app/client/QKDAppClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a174df6adc69ab9ce88b0d8878c92b9b9e7820e
--- /dev/null
+++ b/src/qkd_app/client/QKDAppClient.py
@@ -0,0 +1,64 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
+from common.proto.context_pb2 import Empty, ContextId
+from common.proto.qkd_app_pb2 import App, AppId, AppList
+from common.proto.qkd_app_pb2_grpc import AppServiceStub
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.tools.grpc.Tools import grpc_message_to_json_string
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 15
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class QKDAppClient:
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.QKD_APP)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.QKD_APP)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint))
+        self.channel = None
+        self.stub = None
+        self.connect()
+        LOGGER.debug('Channel created')
+
+    def connect(self):
+        self.channel = grpc.insecure_channel(self.endpoint)
+        self.stub = AppServiceStub(self.channel)
+
+    def close(self):
+        if self.channel is not None: self.channel.close()
+        self.channel = None
+        self.stub = None
+
+
+
+    @RETRY_DECORATOR
+    def RegisterApp(self, request : App) -> Empty:
+        LOGGER.debug('RegisterApp request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.RegisterApp(request)
+        LOGGER.debug('RegisterApp result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    
+    @RETRY_DECORATOR
+    def ListApps(self, request: ContextId) -> AppList:
+        LOGGER.debug('ListApps request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.ListApps(request)
+        LOGGER.debug('ListApps result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
diff --git a/src/qkd_app/client/__init__.py b/src/qkd_app/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..07d08814021ef82220611ee21c01ba01806682e9
--- /dev/null
+++ b/src/qkd_app/client/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/qkd_app/requirements.in b/src/qkd_app/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..a9bce93b569792a75687811f08e03d398ae4aeb5
--- /dev/null
+++ b/src/qkd_app/requirements.in
@@ -0,0 +1,25 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+Flask==2.1.3
+Flask-HTTPAuth==4.5.0
+Flask-RESTful==0.3.9
+jsonschema==4.4.0
+requests==2.27.1
+werkzeug==2.3.7
+nats-py==2.6.*
+psycopg2-binary==2.9.*
+SQLAlchemy==1.4.*
+sqlalchemy-cockroachdb==1.4.*
+SQLAlchemy-Utils==0.38.*
diff --git a/src/qkd_app/service/QKDAppService.py b/src/qkd_app/service/QKDAppService.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6c93cd811a72594804fe8e8e86a9586533a1317
--- /dev/null
+++ b/src/qkd_app/service/QKDAppService.py
@@ -0,0 +1,37 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, sqlalchemy
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.message_broker.MessageBroker import MessageBroker
+from common.proto.qkd_app_pb2_grpc import add_AppServiceServicer_to_server
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from qkd_app.service.QKDAppServiceServicerImpl import AppServiceServicerImpl
+
+# Custom gRPC settings
+GRPC_MAX_WORKERS = 200 # multiple clients might keep connections alive for Get*Events() RPC methods
+LOGGER = logging.getLogger(__name__)
+
+
+class AppService(GenericGrpcService):
+    def __init__(
+        self, db_engine : sqlalchemy.engine.Engine, messagebroker : MessageBroker, cls_name: str = __name__
+    ) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.QKD_APP)
+        super().__init__(port, max_workers=GRPC_MAX_WORKERS, cls_name=cls_name)
+        self.app_servicer = AppServiceServicerImpl(db_engine, messagebroker)
+
+    def install_servicers(self):
+        add_AppServiceServicer_to_server(self.app_servicer, self.server)
diff --git a/src/qkd_app/service/QKDAppServiceServicerImpl.py b/src/qkd_app/service/QKDAppServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..df7a885c47eda9d7a6137c9905388da49c698e7e
--- /dev/null
+++ b/src/qkd_app/service/QKDAppServiceServicerImpl.py
@@ -0,0 +1,73 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging, sqlalchemy
+#from typing import Iterator, Optional
+from common.message_broker.MessageBroker import MessageBroker
+import grpc, json, logging #, deepdiff
+from common.proto.context_pb2 import (
+    Empty, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum, ContextId)
+from common.proto.qkd_app_pb2 import (App, AppId, AppList, QKDAppTypesEnum)
+from common.proto.qkd_app_pb2_grpc import AppServiceServicer
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+#from common.tools.context_queries.InterDomain import is_inter_domain #, is_multi_domain
+#from common.tools.grpc.ConfigRules import copy_config_rules
+#from common.tools.grpc.Constraints import copy_constraints
+#from common.tools.grpc.EndPointIds import copy_endpoint_ids
+#from common.tools.grpc.ServiceIds import update_service_ids
+#from common.tools.grpc.Tools import grpc_message_to_json_string
+#from context.client.ContextClient import ContextClient
+#from qkd_app.client.QKDAppClient import QKDAppClient
+from .database.QKDApp import app_set, app_list_objs, app_get, app_get_by_server
+from common.method_wrappers.ServiceExceptions import NotFoundException
+
+LOGGER = logging.getLogger(__name__)
+
+METRICS_POOL = MetricsPool('QkdApp', 'RPC')
+
+# Optare: This file must be edited based on app's logic
+
+class AppServiceServicerImpl(AppServiceServicer):
+    def __init__(self, db_engine : sqlalchemy.engine.Engine, messagebroker : MessageBroker):
+        LOGGER.debug('Creating Servicer...')
+        self.db_engine = db_engine
+        self.messagebroker = messagebroker
+        LOGGER.debug('Servicer Created')
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def RegisterApp(self, request : App, context : grpc.ServicerContext) -> Empty:
+        # Optare: This is the main function required for the project.
+        # Optare: If it's an internal it will save it directly. If it's an external one it will save it as pending by not providing the remote until the other party requests it too
+        # Optare: Ideally, the only thing needed to change is the code inside the try block. Currently it just searches by a pending app with the same server_id but you can put more restrictions or different search and raise the NotFoundException
+
+        if request.app_type == QKDAppTypesEnum.QKDAPPTYPES_INTERNAL:
+            app_set(self.db_engine, self.messagebroker, request)
+
+        else:
+            try:
+                app = app_get_by_server(self.db_engine, request.server_app_id)
+            except NotFoundException:
+                app = request
+                app_set(self.db_engine, self.messagebroker, app)
+            else:
+                app.remote_device_id.device_uuid.uuid = request.local_device_id.device_uuid.uuid
+                app_set(self.db_engine, self.messagebroker, app)
+                
+        
+        return Empty()
+    
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListApps(self, request: ContextId, context : grpc.ServicerContext) -> AppList:
+        return app_list_objs(self.db_engine)
diff --git a/src/qkd_app/service/__init__.py b/src/qkd_app/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..07d08814021ef82220611ee21c01ba01806682e9
--- /dev/null
+++ b/src/qkd_app/service/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/qkd_app/service/__main__.py b/src/qkd_app/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed7e554728eb2de6240dd4facb7f084337a026a4
--- /dev/null
+++ b/src/qkd_app/service/__main__.py
@@ -0,0 +1,94 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from prometheus_client import start_http_server
+#from common.Constants import ServiceNameEnum
+from common.Settings import (
+    #ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name,
+    get_log_level, get_metrics_port, wait_for_environment_variables)
+from qkd_app.service.QKDAppService import AppService
+from qkd_app.service.rest_server.RestServer import RestServer
+from qkd_app.service.rest_server.qkd_app import register_qkd_app
+#from common.message_broker.Factory import get_messagebroker_backend
+#from common.message_broker.MessageBroker import MessageBroker
+from qkd_app.service.database.Engine import Engine
+from qkd_app.service.database.models._Base import rebuild_database
+
+terminate = threading.Event()
+LOGGER : logging.Logger = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global LOGGER # pylint: disable=global-statement
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+    LOGGER = logging.getLogger(__name__)
+
+    wait_for_environment_variables([
+        #get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     ),
+        #get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+    ])
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.info('Starting...')
+
+    # Start metrics server
+    metrics_port = get_metrics_port()
+    start_http_server(metrics_port)
+
+    # Get Database Engine instance and initialize database, if needed
+    LOGGER.info('Getting SQLAlchemy DB Engine...')
+    db_engine = Engine.get_engine()
+    if db_engine is None:
+        LOGGER.error('Unable to get SQLAlchemy DB Engine...')
+        return -1
+
+    try:
+        Engine.create_database(db_engine)
+    except: # pylint: disable=bare-except # pragma: no cover
+        LOGGER.exception('Failed to check/create the database: {:s}'.format(str(db_engine.url)))
+
+    rebuild_database(db_engine)
+
+    # Get message broker instance
+    messagebroker = None #MessageBroker(get_messagebroker_backend())
+
+    # Starting context service
+    grpc_service = AppService(db_engine, messagebroker)
+    grpc_service.start()
+
+    rest_server = RestServer()
+    register_qkd_app(rest_server)
+    rest_server.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=1.0): pass
+
+    LOGGER.info('Terminating...')
+    grpc_service.stop()
+    rest_server.shutdown()
+    rest_server.join()
+
+    LOGGER.info('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/kpi_manager/database/KpiEngine.py b/src/qkd_app/service/database/Engine.py
similarity index 67%
rename from src/kpi_manager/database/KpiEngine.py
rename to src/qkd_app/service/database/Engine.py
index 0fce7e3d36cf2f03a18f311c815719a4f17b2869..8f528f9a1b3cacca2ea260901ab808461dd3183d 100644
--- a/src/kpi_manager/database/KpiEngine.py
+++ b/src/qkd_app/service/database/Engine.py
@@ -12,29 +12,44 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, sqlalchemy
+import logging, sqlalchemy, sqlalchemy_utils
 from common.Settings import get_setting
 
 LOGGER = logging.getLogger(__name__)
+
+APP_NAME = 'tfs'
+ECHO = False # true: dump SQL commands and transactions executed
 CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}'
 
-class KpiEngine:
+class Engine:
     @staticmethod
     def get_engine() -> sqlalchemy.engine.Engine:
         crdb_uri = get_setting('CRDB_URI', default=None)
         if crdb_uri is None:
             CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE')
             CRDB_SQL_PORT  = get_setting('CRDB_SQL_PORT')
-            CRDB_DATABASE  = 'tfs_kpi_mgmt'             # TODO: define variable get_setting('CRDB_DATABASE_KPI_MGMT')
+            CRDB_DATABASE  = get_setting('CRDB_DATABASE_APP')
             CRDB_USERNAME  = get_setting('CRDB_USERNAME')
             CRDB_PASSWORD  = get_setting('CRDB_PASSWORD')
             CRDB_SSLMODE   = get_setting('CRDB_SSLMODE')
             crdb_uri = CRDB_URI_TEMPLATE.format(
                 CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
+
         try:
-            engine = sqlalchemy.create_engine(crdb_uri, echo=False)
-            LOGGER.info(' KpiDBmanager initalized with DB URL: {:}'.format(crdb_uri))
+            engine = sqlalchemy.create_engine(
+                crdb_uri, connect_args={'application_name': APP_NAME}, echo=ECHO, future=True)
         except: # pylint: disable=bare-except # pragma: no cover
             LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri)))
-            return None # type: ignore
+            return None
+
         return engine
+
+    @staticmethod
+    def create_database(engine : sqlalchemy.engine.Engine) -> None:
+        if not sqlalchemy_utils.database_exists(engine.url):
+            sqlalchemy_utils.create_database(engine.url)
+
+    @staticmethod
+    def drop_database(engine : sqlalchemy.engine.Engine) -> None:
+        if sqlalchemy_utils.database_exists(engine.url):
+            sqlalchemy_utils.drop_database(engine.url)
diff --git a/src/qkd_app/service/database/QKDApp.py b/src/qkd_app/service/database/QKDApp.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1fb90d4efcd0770bcc4c48c1f00deb0e95687ad
--- /dev/null
+++ b/src/qkd_app/service/database/QKDApp.py
@@ -0,0 +1,185 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime, logging, uuid
+from sqlalchemy.dialects.postgresql import insert
+from sqlalchemy.engine import Engine
+from sqlalchemy.orm import Session, selectinload, sessionmaker
+from sqlalchemy_cockroachdb import run_transaction
+from typing import Dict, List, Optional, Set, Tuple
+from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException
+from common.message_broker.MessageBroker import MessageBroker
+from common.proto.context_pb2 import Empty
+from common.proto.qkd_app_pb2 import (
+    AppList, App, AppId)
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from .models.QKDAppModel import AppModel
+from .models.enums.QKDAppStatus import grpc_to_enum__qkd_app_status
+from .models.enums.QKDAppTypes import grpc_to_enum__qkd_app_types
+from .uuids.QKDApp import app_get_uuid
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.QKDApp import json_app_id
+from context.service.database.uuids.Context import context_get_uuid
+
+
+
+#from .Events import notify_event_context, notify_event_device, notify_event_topology
+
+LOGGER = logging.getLogger(__name__)
+
+
+def app_list_objs(db_engine : Engine) -> AppList:
+    def callback(session : Session) -> List[Dict]:
+        obj_list : List[AppModel] = session.query(AppModel)\
+            .all()
+        return [obj.dump() for obj in obj_list]
+    apps = run_transaction(sessionmaker(bind=db_engine), callback)
+    return AppList(apps=apps)
+
+def app_get(db_engine : Engine, request : AppId) -> App:
+    app_uuid = app_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> Optional[Dict]:
+        obj : Optional[AppModel] = session.query(AppModel)\
+            .filter_by(app_uuid=app_uuid).one_or_none()
+        return None if obj is None else obj.dump()
+    obj = run_transaction(sessionmaker(bind=db_engine), callback)
+    if obj is None:
+        raw_app_uuid = request.app_uuid.uuid
+        raise NotFoundException('App', raw_app_uuid, extra_details=[
+            'app_uuid generated was: {:s}'.format(app_uuid)
+        ])
+    return App(**obj)
+
+def app_set(db_engine : Engine, messagebroker : MessageBroker, request : App) -> AppId:
+    context_uuid = context_get_uuid(request.app_id.context_id, allow_random=False)
+    raw_app_uuid = request.app_id.app_uuid.uuid
+    app_uuid = app_get_uuid(request.app_id, allow_random=True)
+
+    app_type = request.app_type
+    app_status = grpc_to_enum__qkd_app_status(request.app_status)
+    app_type = grpc_to_enum__qkd_app_types(request.app_type)
+
+    now = datetime.datetime.utcnow()
+
+    
+    app_data = [{
+        'context_uuid'       : context_uuid,
+        'app_uuid'           : app_uuid,
+        'app_status'         : app_status,
+        'app_type'           : app_type,
+        'server_app_id'      : request.server_app_id,
+        'client_app_id'      : request.client_app_id,
+        'backing_qkdl_uuid'  : [qkdl_id.qkdl_uuid.uuid for qkdl_id in request.backing_qkdl_id],
+        'local_device_uuid'  : request.local_device_id.device_uuid.uuid,
+        'remote_device_uuid' : request.remote_device_id.device_uuid.uuid or None,
+        'created_at'         : now,
+        'updated_at'         : now,
+    }]
+
+
+    def callback(session : Session) -> Tuple[bool, List[Dict]]:
+        stmt = insert(AppModel).values(app_data)
+        stmt = stmt.on_conflict_do_update(
+            index_elements=[AppModel.app_uuid],
+            set_=dict(
+                app_status         = stmt.excluded.app_status,
+                app_type           = stmt.excluded.app_type,
+                server_app_id      = stmt.excluded.server_app_id,
+                client_app_id      = stmt.excluded.client_app_id,
+                backing_qkdl_uuid  = stmt.excluded.backing_qkdl_uuid,
+                local_device_uuid  = stmt.excluded.local_device_uuid,
+                remote_device_uuid = stmt.excluded.remote_device_uuid,
+                updated_at         = stmt.excluded.updated_at,
+            )
+        )
+        stmt = stmt.returning(AppModel.created_at, AppModel.updated_at)
+        created_at,updated_at = session.execute(stmt).fetchone()
+        updated = updated_at > created_at
+
+        return updated
+
+    updated = run_transaction(sessionmaker(bind=db_engine), callback)
+    context_id = json_context_id(context_uuid)
+    app_id = json_app_id(app_uuid, context_id=context_id)
+    #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+    #notify_event_app(messagebroker, event_type, app_id)
+    #notify_event_context(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, context_id)
+    return AppId(**app_id)
+
+
+
+def app_get_by_server(db_engine : Engine, request : str) -> App:
+    def callback(session : Session) -> Optional[Dict]:
+        obj : Optional[AppModel] = session.query(AppModel)\
+            .filter_by(server_app_id=request).one_or_none()
+        return None if obj is None else obj.dump()
+    obj = run_transaction(sessionmaker(bind=db_engine), callback)
+    if obj is None:
+        raise NotFoundException('No app match found for', request)
+    return App(**obj)
+
+
+
+"""
+def device_delete(db_engine : Engine, messagebroker : MessageBroker, request : DeviceId) -> Empty:
+    device_uuid = device_get_uuid(request, allow_random=False)
+    def callback(session : Session) -> Tuple[bool, List[Dict]]:
+        query = session.query(TopologyDeviceModel)
+        query = query.filter_by(device_uuid=device_uuid)
+        topology_device_list : List[TopologyDeviceModel] = query.all()
+        topology_ids = [obj.topology.dump_id() for obj in topology_device_list]
+        num_deleted = session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete()
+        return num_deleted > 0, topology_ids
+    deleted, updated_topology_ids = run_transaction(sessionmaker(bind=db_engine), callback)
+    device_id = json_device_id(device_uuid)
+    if deleted:
+        notify_event_device(messagebroker, EventTypeEnum.EVENTTYPE_REMOVE, device_id)
+
+        context_ids  : Dict[str, Dict] = dict()
+        topology_ids : Dict[str, Dict] = dict()
+        for topology_id in updated_topology_ids:
+            topology_uuid = topology_id['topology_uuid']['uuid']
+            topology_ids[topology_uuid] = topology_id
+            context_id = topology_id['context_id']
+            context_uuid = context_id['context_uuid']['uuid']
+            context_ids[context_uuid] = context_id
+
+        for topology_id in topology_ids.values():
+            notify_event_topology(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, topology_id)
+
+        for context_id in context_ids.values():
+            notify_event_context(messagebroker, EventTypeEnum.EVENTTYPE_UPDATE, context_id)
+
+    return Empty()
+
+def device_select(db_engine : Engine, request : DeviceFilter) -> DeviceList:
+    device_uuids = [
+        device_get_uuid(device_id, allow_random=False)
+        for device_id in request.device_ids.device_ids
+    ]
+    dump_params = dict(
+        include_endpoints   =request.include_endpoints,
+        include_config_rules=request.include_config_rules,
+        include_components  =request.include_components,
+    )
+    def callback(session : Session) -> List[Dict]:
+        query = session.query(DeviceModel)
+        if request.include_endpoints   : query = query.options(selectinload(DeviceModel.endpoints))
+        if request.include_config_rules: query = query.options(selectinload(DeviceModel.config_rules))
+        #if request.include_components  : query = query.options(selectinload(DeviceModel.components))
+        obj_list : List[DeviceModel] = query.filter(DeviceModel.device_uuid.in_(device_uuids)).all()
+        return [obj.dump(**dump_params) for obj in obj_list]
+    devices = run_transaction(sessionmaker(bind=db_engine), callback)
+    return DeviceList(devices=devices)
+"""
diff --git a/src/qkd_app/service/database/__init__.py b/src/qkd_app/service/database/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..07d08814021ef82220611ee21c01ba01806682e9
--- /dev/null
+++ b/src/qkd_app/service/database/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/qkd_app/service/database/models/QKDAppModel.py b/src/qkd_app/service/database/models/QKDAppModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..c32b4e28c95105d8659cb52790f51b330764c2cf
--- /dev/null
+++ b/src/qkd_app/service/database/models/QKDAppModel.py
@@ -0,0 +1,63 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import operator
+from sqlalchemy import CheckConstraint, Column, DateTime, Float, Enum, ForeignKey, Integer, String
+from sqlalchemy.dialects.postgresql import UUID, ARRAY
+from sqlalchemy.orm import relationship
+from typing import Dict
+from ._Base import _Base
+from .enums.QKDAppStatus import ORM_QKDAppStatusEnum
+from .enums.QKDAppTypes import ORM_QKDAppTypesEnum
+
+class AppModel(_Base):
+    __tablename__ = 'qkd_app'
+
+    app_uuid            = Column(UUID(as_uuid=False), primary_key=True)
+    context_uuid        = Column(UUID(as_uuid=False), nullable=False) # Supposed to be Foreign Key
+    app_status          = Column(Enum(ORM_QKDAppStatusEnum), nullable=False)
+    app_type            = Column(Enum(ORM_QKDAppTypesEnum), nullable=False)
+    server_app_id       = Column(String, nullable=False)
+    client_app_id       = Column(ARRAY(String), nullable=False)
+    backing_qkdl_uuid   = Column(ARRAY(UUID(as_uuid=False)), nullable=False)
+    local_device_uuid   = Column(UUID(as_uuid=False), nullable=False)
+    remote_device_uuid  = Column(UUID(as_uuid=False), nullable=True)
+
+    # Optare: Created_at and Updated_at are only used to know if an app was updated later on the code. Don't change it
+
+    created_at          = Column(DateTime, nullable=False)
+    updated_at          = Column(DateTime, nullable=False)
+
+    #__table_args__ = (
+    #    CheckConstraint(... >= 0, name='name_value_...'),
+    #)
+
+    def dump_id(self) -> Dict:
+        return {
+            'context_id': {'context_uuid': {'uuid': self.context_uuid}},
+            'app_uuid': {'uuid': self.app_uuid}
+        }
+
+    def dump(self) -> Dict:
+        result = {
+            'app_id'           : self.dump_id(),
+            'app_status'       : self.app_status.value,
+            'app_type'         : self.app_type.value,
+            'server_app_id'    : self.server_app_id,
+            'client_app_id'    : self.client_app_id,
+            'backing_qkdl_id'  : [{'qkdl_uuid': {'uuid': qkdl_id}} for qkdl_id in self.backing_qkdl_uuid],
+            'local_device_id'  : {'device_uuid': {'uuid': self.local_device_uuid}},
+            'remote_device_id' : {'device_uuid': {'uuid': self.remote_device_uuid}},
+        }
+        return result
diff --git a/src/qkd_app/service/database/models/_Base.py b/src/qkd_app/service/database/models/_Base.py
new file mode 100644
index 0000000000000000000000000000000000000000..51863e1d5c06a875c298eab726cfdc3b7fcb75ca
--- /dev/null
+++ b/src/qkd_app/service/database/models/_Base.py
@@ -0,0 +1,44 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sqlalchemy
+from typing import Any, List
+from sqlalchemy.orm import Session, sessionmaker, declarative_base
+from sqlalchemy.sql import text
+from sqlalchemy_cockroachdb import run_transaction
+
+_Base = declarative_base()
+
+'''
+def create_performance_enhancers(db_engine : sqlalchemy.engine.Engine) -> None:
+    def index_storing(
+        index_name : str, table_name : str, index_fields : List[str], storing_fields : List[str]
+    ) -> Any:
+        str_index_fields = ','.join(['"{:s}"'.format(index_field) for index_field in index_fields])
+        str_storing_fields = ','.join(['"{:s}"'.format(storing_field) for storing_field in storing_fields])
+        INDEX_STORING = 'CREATE INDEX IF NOT EXISTS {:s} ON "{:s}" ({:s}) STORING ({:s});'
+        return text(INDEX_STORING.format(index_name, table_name, str_index_fields, str_storing_fields))
+
+    statements = [
+        # In case of relations
+    ]
+    def callback(session : Session) -> bool:
+        for stmt in statements: session.execute(stmt)
+    run_transaction(sessionmaker(bind=db_engine), callback)
+'''
+
+def rebuild_database(db_engine : sqlalchemy.engine.Engine, drop_if_exists : bool = False):
+    if drop_if_exists: _Base.metadata.drop_all(db_engine)
+    _Base.metadata.create_all(db_engine)
+    #create_performance_enhancers(db_engine)
diff --git a/src/qkd_app/service/database/models/__init__.py b/src/qkd_app/service/database/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..07d08814021ef82220611ee21c01ba01806682e9
--- /dev/null
+++ b/src/qkd_app/service/database/models/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/qkd_app/service/database/models/enums/QKDAppStatus.py b/src/qkd_app/service/database/models/enums/QKDAppStatus.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3063ef56704ce1bdd48d15ea8c6486ed7c8cfae
--- /dev/null
+++ b/src/qkd_app/service/database/models/enums/QKDAppStatus.py
@@ -0,0 +1,27 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum, functools
+from common.proto.qkd_app_pb2 import QKDAppStatusEnum
+from ._GrpcToEnum import grpc_to_enum
+
+class ORM_QKDAppStatusEnum(enum.Enum):
+    ON           = QKDAppStatusEnum.QKDAPPSTATUS_ON
+    DISCONNECTED = QKDAppStatusEnum.QKDAPPSTATUS_DISCONNECTED
+    OUT_OF_TIME  = QKDAppStatusEnum.QKDAPPSTATUS_OUT_OF_TIME
+    ZOMBIE       = QKDAppStatusEnum.QKDAPPSTATUS_ZOMBIE
+
+
+grpc_to_enum__qkd_app_status = functools.partial(
+    grpc_to_enum, QKDAppStatusEnum, ORM_QKDAppStatusEnum)
diff --git a/src/qkd_app/service/database/models/enums/QKDAppTypes.py b/src/qkd_app/service/database/models/enums/QKDAppTypes.py
new file mode 100644
index 0000000000000000000000000000000000000000..f50b8982d80c0af97c2cbd96d336f450afc50f9b
--- /dev/null
+++ b/src/qkd_app/service/database/models/enums/QKDAppTypes.py
@@ -0,0 +1,25 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum, functools
+from common.proto.qkd_app_pb2 import QKDAppTypesEnum
+from ._GrpcToEnum import grpc_to_enum
+
+class ORM_QKDAppTypesEnum(enum.Enum):
+    INTERNAL = QKDAppTypesEnum.QKDAPPTYPES_INTERNAL
+    CLIENT   = QKDAppTypesEnum.QKDAPPTYPES_CLIENT
+
+
+grpc_to_enum__qkd_app_types = functools.partial(
+    grpc_to_enum, QKDAppTypesEnum, ORM_QKDAppTypesEnum)
diff --git a/src/qkd_app/service/database/models/enums/_GrpcToEnum.py b/src/qkd_app/service/database/models/enums/_GrpcToEnum.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dcad039f7be240acbbec418d12475557e4c42c1
--- /dev/null
+++ b/src/qkd_app/service/database/models/enums/_GrpcToEnum.py
@@ -0,0 +1,38 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+from enum import Enum
+from typing import Optional
+
+# Enumeration classes are redundant with gRPC classes, but gRPC does not provide a programmatical method to retrieve
+# the values it expects from strings containing the desired value symbol or its integer value, so a kind of mapping is
+# required. Besides, ORM Models expect Enum classes in EnumeratedFields; we create specific and conveniently defined
+# Enum classes to serve both purposes.
+
+def grpc_to_enum(grpc_enum_class, orm_enum_class : Enum, grpc_enum_value, grpc_enum_prefix : Optional[str] = None):
+    enum_name = grpc_enum_class.Name(grpc_enum_value)
+
+    if grpc_enum_prefix is None:
+        grpc_enum_prefix = orm_enum_class.__name__.upper()
+        #grpc_enum_prefix = re.sub(r'^ORM_(.+)$', r'\1', grpc_enum_prefix)
+        #grpc_enum_prefix = re.sub(r'^(.+)ENUM$', r'\1', grpc_enum_prefix)
+        #grpc_enum_prefix = grpc_enum_prefix + '_'
+        grpc_enum_prefix = re.sub(r'^ORM_(.+)ENUM$', r'\1_', grpc_enum_prefix)
+
+    if len(grpc_enum_prefix) > 0:
+        enum_name = enum_name.replace(grpc_enum_prefix, '')
+
+    orm_enum_value = orm_enum_class._member_map_.get(enum_name)
+    return orm_enum_value
diff --git a/src/qkd_app/service/database/models/enums/__init__.py b/src/qkd_app/service/database/models/enums/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..07d08814021ef82220611ee21c01ba01806682e9
--- /dev/null
+++ b/src/qkd_app/service/database/models/enums/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/qkd_app/service/database/uuids/QKDApp.py b/src/qkd_app/service/database/uuids/QKDApp.py
new file mode 100644
index 0000000000000000000000000000000000000000..175f1d5f3cf4ceda12a022b4afadb376e11ae5a5
--- /dev/null
+++ b/src/qkd_app/service/database/uuids/QKDApp.py
@@ -0,0 +1,30 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.proto.qkd_app_pb2 import AppId
+from common.method_wrappers.ServiceExceptions import InvalidArgumentsException
+from ._Builder import get_uuid_from_string, get_uuid_random
+
+def app_get_uuid(
+    app_id : AppId, allow_random : bool = False
+) -> str:
+    app_uuid = app_id.app_uuid.uuid
+
+    if len(app_uuid) > 0:
+        return get_uuid_from_string(app_uuid)
+    if allow_random: return get_uuid_random()
+
+    raise InvalidArgumentsException([
+        ('app_id.app_uuid.uuid', app_uuid),
+    ], extra_details=['At least one is required to produce a App UUID'])
diff --git a/src/qkd_app/service/database/uuids/_Builder.py b/src/qkd_app/service/database/uuids/_Builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..39c98de69d577ce2722693e57c4ee678124f9e30
--- /dev/null
+++ b/src/qkd_app/service/database/uuids/_Builder.py
@@ -0,0 +1,44 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Optional, Union
+from uuid import UUID, uuid4, uuid5
+
+# Generate a UUIDv5-like from the SHA-1 of "TFS" and no namespace to be used as the NAMESPACE for all
+# the context UUIDs generated. For efficiency purposes, the UUID is hardcoded; however, it is produced
+# using the following code:
+#    from hashlib import sha1
+#    from uuid import UUID
+#    hash = sha1(bytes('TFS', 'utf-8')).digest()
+#    NAMESPACE_TFS = UUID(bytes=hash[:16], version=5)
+NAMESPACE_TFS = UUID('200e3a1f-2223-534f-a100-758e29c37f40')
+
+def get_uuid_from_string(str_uuid_or_name : Union[str, UUID], prefix_for_name : Optional[str] = None) -> str:
+    # if UUID given, assume it is already a valid UUID
+    if isinstance(str_uuid_or_name, UUID): return str_uuid_or_name
+    if not isinstance(str_uuid_or_name, str):
+        MSG = 'Parameter({:s}) cannot be used to produce a UUID'
+        raise Exception(MSG.format(str(repr(str_uuid_or_name))))
+    try:
+        # try to parse as UUID
+        return str(UUID(str_uuid_or_name))
+    except: # pylint: disable=bare-except
+        # produce a UUID within TFS namespace from parameter
+        if prefix_for_name is not None:
+            str_uuid_or_name = '{:s}/{:s}'.format(prefix_for_name, str_uuid_or_name)
+        return str(uuid5(NAMESPACE_TFS, str_uuid_or_name))
+
+def get_uuid_random() -> str:
+    # Generate random UUID. No need to use namespace since "namespace + random = random".
+    return str(uuid4())
diff --git a/src/qkd_app/service/rest_server/RestServer.py b/src/qkd_app/service/rest_server/RestServer.py
new file mode 100644
index 0000000000000000000000000000000000000000..e21531c5bcf0e1cf15a8f08952d6325a8349f398
--- /dev/null
+++ b/src/qkd_app/service/rest_server/RestServer.py
@@ -0,0 +1,23 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_baseurl_http, get_service_port_http
+from common.tools.service.GenericRestServer import GenericRestServer
+
+class RestServer(GenericRestServer):
+    def __init__(self, cls_name: str = __name__) -> None:
+        bind_port = get_service_port_http(ServiceNameEnum.QKD_APP)
+        base_url = get_service_baseurl_http(ServiceNameEnum.QKD_APP)
+        super().__init__(bind_port, base_url, cls_name=cls_name)
diff --git a/src/qkd_app/service/rest_server/__init__.py b/src/qkd_app/service/rest_server/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..07d08814021ef82220611ee21c01ba01806682e9
--- /dev/null
+++ b/src/qkd_app/service/rest_server/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/qkd_app/service/rest_server/qkd_app/Resources.py b/src/qkd_app/service/rest_server/qkd_app/Resources.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ba79d3940da91dfebc1a1c666893548caccbe6c
--- /dev/null
+++ b/src/qkd_app/service/rest_server/qkd_app/Resources.py
@@ -0,0 +1,86 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid, json
+from flask import request
+from flask_restful import Resource
+from common.proto.context_pb2 import Empty
+from common.proto.qkd_app_pb2 import App, QKDAppTypesEnum
+from common.Constants import DEFAULT_CONTEXT_NAME
+from context.client.ContextClient import ContextClient
+from qkd_app.client.QKDAppClient import QKDAppClient
+
+
+class _Resource(Resource):
+    def __init__(self) -> None:
+        super().__init__()
+        self.context_client = ContextClient()
+        self.qkd_app_client = QKDAppClient()
+
+class Index(_Resource):
+    def get(self):
+        return {'hello': 'world'}
+
+class CreateQKDApp(_Resource):
+    # Optare: Post request for the QKD Node to call the TeraflowSDN. Example of requests below
+    def post(self):
+        app = request.get_json()['app']
+
+        devices = self.context_client.ListDevices(Empty())
+        devices = devices.devices
+
+        local_device = None
+
+
+        # This for-loop won't be necessary if we can garantee Device ID is the same as QKDN Id
+        for device in devices:
+            for config_rule in device.device_config.config_rules:
+                if config_rule.custom.resource_key == '__node__':
+                    value = json.loads(config_rule.custom.resource_value)
+                    qkdn_id = value['qkdn_id']
+                    if app['local_qkdn_id'] == qkdn_id:
+                        local_device = device
+                    break
+
+        # Optare: Todo:  Verify that a service is present for this app
+        '''
+        requests.post('http://10.211.36.220/app/create_qkd_app', json={'app': {'server_app_id':'1', 'client_app_id':[], 'app_status':'ON', 'local_qkdn_id':'00000001-0000-0000-0000-000000000000', 'backing_qkdl_id':['00000003-0002-0000-0000-000000000000']}})
+
+
+        requests.post('http://10.211.36.220/app/create_qkd_app', json={'app': {'server_app_id':'1', 'client_app_id':[], 'app_status':'ON', 'local_qkdn_id':'00000003-0000-0000-0000-000000000000', 'backing_qkdl_id':['00000003-0002-0000-0000-000000000000']}})
+        '''
+        
+
+        if local_device is None:
+            return {"status": "fail"}
+
+        external_app_src_dst = {
+            'app_id': {'context_id': {'context_uuid': {'uuid': DEFAULT_CONTEXT_NAME}}, 'app_uuid': {'uuid': ''}},
+            'app_status': 'QKDAPPSTATUS_' + app['app_status'],
+            'app_type': QKDAppTypesEnum.QKDAPPTYPES_CLIENT,
+            'server_app_id': app['server_app_id'],
+            'client_app_id': app['client_app_id'],
+            'backing_qkdl_id': [{'qkdl_uuid': {'uuid': qkdl_id}} for qkdl_id in app['backing_qkdl_id']],
+            'local_device_id': local_device.device_id,
+            'remote_device_id': {'device_uuid': {'uuid': ''}},
+        }
+
+
+        # Optare: This will call our internal RegisterApp which supports the creation of both internal and external app.
+        # Optare the verification for knowing if two parties are requesting the same app is done inside RegisterApp's function
+        self.qkd_app_client.RegisterApp(App(**external_app_src_dst))
+
+        # Optare: Todo: Communicate by SBI with both Nodes of the new App
+
+        return {"status": "success"}
diff --git a/src/qkd_app/service/rest_server/qkd_app/__init__.py b/src/qkd_app/service/rest_server/qkd_app/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fc23b371414dcb2bac4afde63524febf71e5337
--- /dev/null
+++ b/src/qkd_app/service/rest_server/qkd_app/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from qkd_app.service.rest_server.RestServer import RestServer
+from .Resources import (
+    CreateQKDApp, Index)
+
+URL_PREFIX = '/qkd_app'
+
+# Use 'path' type since some identifiers might contain char '/' and Flask is unable to recognize them in 'string' type.
+RESOURCES = [
+    # (endpoint_name, resource_class, resource_url)
+    ('api.index',    Index,    '/'),
+    ('api.register_qkd_app',    CreateQKDApp,    '/create_qkd_app'),
+]
+
+def register_qkd_app(app_server : RestServer):
+    for endpoint_name, resource_class, resource_url in RESOURCES:
+        app_server.add_resource(resource_class, URL_PREFIX + resource_url, endpoint=endpoint_name)
diff --git a/src/service/Dockerfile b/src/service/Dockerfile
index a847ae762d1303dda852d7f3d8200d3db3ef53f7..6f23f0a89387e2db3802ee20e003ccb6482844ff 100644
--- a/src/service/Dockerfile
+++ b/src/service/Dockerfile
@@ -70,6 +70,8 @@ COPY src/pathcomp/frontend/__init__.py pathcomp/frontend/__init__.py
 COPY src/pathcomp/frontend/client/. pathcomp/frontend/client/
 COPY src/e2e_orchestrator/__init__.py e2e_orchestrator/__init__.py
 COPY src/e2e_orchestrator/client/. e2e_orchestrator/client/
+COPY src/qkd_app/__init__.py qkd_app/__init__.py
+COPY src/qkd_app/client/. qkd_app/client/
 COPY src/service/. service/
 
 # Start the service
diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py
index eb821972a6447a85990261586e656f2f365dda0d..45a8e0b6c0fd9a26b45fc47d55074d8863c0caed 100644
--- a/src/service/service/ServiceServiceServicerImpl.py
+++ b/src/service/service/ServiceServiceServicerImpl.py
@@ -30,6 +30,9 @@ from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_s
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Topology import json_topology_id
 from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
+from common.Settings import (
+    is_deployed_e2e_orch, is_deployed_optical, is_deployed_te
+)
 from context.client.ContextClient import ContextClient
 from e2e_orchestrator.client.E2EOrchestratorClient import E2EOrchestratorClient
 from pathcomp.frontend.client.PathCompClient import PathCompClient
@@ -142,7 +145,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
             service.service_type = request.service_type                                     # pylint: disable=no-member
         service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED     # pylint: disable=no-member
 
-        if service.service_type == ServiceTypeEnum.SERVICETYPE_TE:
+        if is_deployed_te() and service.service_type == ServiceTypeEnum.SERVICETYPE_TE:
             # TE service:
             context_client.SetService(request)
 
@@ -164,7 +167,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
                 str_service_status = ServiceStatusEnum.Name(service_status.service_status)
                 raise Exception(MSG.format(service_key, str_service_status))
 
-        if service.service_type == ServiceTypeEnum.SERVICETYPE_E2E:
+        if is_deployed_e2e_orch() and service.service_type == ServiceTypeEnum.SERVICETYPE_E2E:
             # End-to-End service:
             service_id_with_uuids = context_client.SetService(request)
 
@@ -248,7 +251,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
 
         tasks_scheduler = TasksScheduler(self.service_handler_factory)
 
-        if service.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY:
+        if is_deployed_optical() and service.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY:
             context_id_x = json_context_id(DEFAULT_CONTEXT_NAME)
             topology_id_x = json_topology_id(
                 DEFAULT_TOPOLOGY_NAME, context_id_x)
@@ -341,14 +344,14 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
         service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL
         context_client.SetService(service)
 
-        if service.service_type == ServiceTypeEnum.SERVICETYPE_TE:
+        if is_deployed_te() and service.service_type == ServiceTypeEnum.SERVICETYPE_TE:
             # TE service
             te_service_client = TEServiceClient()
             te_service_client.DeleteLSP(request)
             context_client.RemoveService(request)
             return Empty()
 
-        if service.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY:
+        if is_deployed_optical() and service.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY:
             devs = []
 
             context_id_x = json_context_id(DEFAULT_CONTEXT_NAME)
diff --git a/src/service/service/service_handlers/qkd/qkd_service_handler.py b/src/service/service/service_handlers/qkd/qkd_service_handler.py
index 76c67867ee2f4bae60b8dd6e187f221f2efc1eb0..0977388005ef72fe036de93de2dc73438f0c6163 100644
--- a/src/service/service/service_handlers/qkd/qkd_service_handler.py
+++ b/src/service/service/service_handlers/qkd/qkd_service_handler.py
@@ -17,7 +17,7 @@ import json, logging, uuid
 from typing import Any, Dict, List, Optional, Tuple, Union
 from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
 from common.proto.context_pb2 import ConfigRule, DeviceId, Service
-from common.proto.app_pb2 import App, QKDAppStatusEnum, QKDAppTypesEnum
+from common.proto.qkd_app_pb2 import App, QKDAppStatusEnum, QKDAppTypesEnum
 from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
 from common.tools.object_factory.Device import json_device_id
 from common.type_checkers.Checkers import chk_type
diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py
index cd20faad23a06678be39dbacc476a0ea25d4d540..cb27993702963b4aac88ec04eca2a1c796d0c364 100644
--- a/src/service/service/task_scheduler/TaskExecutor.py
+++ b/src/service/service/task_scheduler/TaskExecutor.py
@@ -20,6 +20,7 @@ from common.proto.context_pb2 import (
     Connection, ConnectionId, Device, DeviceDriverEnum, DeviceId, Service, ServiceId,
     OpticalConfig, OpticalConfigId
 )
+from common.proto.qkd_app_pb2 import App
 from common.tools.context_queries.Connection import get_connection_by_id
 from common.tools.context_queries.Device import get_device
 from common.tools.context_queries.Service import get_service_by_id
@@ -27,11 +28,12 @@ from common.tools.grpc.Tools import grpc_message_to_json_string
 from common.tools.object_factory.Device import json_device_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
+from qkd_app.client.QKDAppClient import QKDAppClient
 from service.service.service_handler_api.Exceptions import (
     UnsatisfiedFilterException, UnsupportedFilterFieldException, UnsupportedFilterFieldValueException
 )
 from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory, get_service_handler_class
-from service.service.tools.ObjectKeys import get_connection_key, get_device_key, get_service_key
+from service.service.tools.ObjectKeys import get_connection_key, get_device_key, get_service_key, get_app_key
 
 if TYPE_CHECKING:
     from service.service.service_handler_api._ServiceHandler import _ServiceHandler
@@ -44,11 +46,14 @@ class CacheableObjectType(Enum):
     CONNECTION = 'connection'
     DEVICE     = 'device'
     SERVICE    = 'service'
+    QKD_APP    = 'qkd-app'
 
 class TaskExecutor:
     def __init__(self, service_handler_factory : ServiceHandlerFactory) -> None:
         self._service_handler_factory = service_handler_factory
         self._context_client = ContextClient()
+        # DEPENDENCY QKD
+        self._qkd_app_client = QKDAppClient()
         self._device_client = DeviceClient()
         self._grpc_objects_cache : Dict[str, CacheableObject] = dict()
 
@@ -220,3 +225,12 @@ class TaskExecutor:
                     str(dict_connection_devices)
                 )
             )
+
+
+    # ----- QkdApp-related methods -------------------------------------------------------------------------------------
+
+    def register_app(self, app: App) -> None:
+        app_key = get_app_key(app.app_id)
+        self._qkd_app_client.RegisterApp(app)
+        LOGGER.info("reg registered")
+        self._store_grpc_object(CacheableObjectType.QKD_APP, app_key, app)
diff --git a/src/service/service/tools/ObjectKeys.py b/src/service/service/tools/ObjectKeys.py
index f45126e07df6a74a20e507fd51d08f1a32de7f98..cfc719bba736a4ea0789b028a97ca267b2d04089 100644
--- a/src/service/service/tools/ObjectKeys.py
+++ b/src/service/service/tools/ObjectKeys.py
@@ -13,6 +13,7 @@
 # limitations under the License.
 
 from common.proto.context_pb2 import ConnectionId, DeviceId, ServiceId
+from common.proto.qkd_app_pb2 import AppId
 
 def get_connection_key(connection_id : ConnectionId) -> str:
     return connection_id.connection_uuid.uuid
@@ -24,3 +25,7 @@ def get_service_key(service_id : ServiceId) -> str:
     context_uuid = service_id.context_id.context_uuid.uuid
     service_uuid = service_id.service_uuid.uuid
     return '{:s}/{:s}'.format(context_uuid, service_uuid)
+
+def get_app_key(app_id : AppId) -> str:
+    return app_id.app_uuid.uuid
+
diff --git a/src/telemetry/.gitlab-ci.yml b/src/telemetry/.gitlab-ci.yml
index 48fd2f49384770acacbba551443c68e3e0950475..358a93af8d309c3ea8e80c9b905792763ad51de8 100644
--- a/src/telemetry/.gitlab-ci.yml
+++ b/src/telemetry/.gitlab-ci.yml
@@ -69,6 +69,7 @@ unit_test telemetry-backend:
     - docker pull "bitnami/kafka:latest"
     - >
       docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
+      --env ALLOW_ANONYMOUS_LOGIN=yes
       bitnami/zookeeper:latest
     - sleep 10 # Wait for Zookeeper to start
     - >
@@ -94,12 +95,12 @@ unit_test telemetry-backend:
     - docker exec -i ${IMAGE_NAME}-backend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
+    - docker rm -f ${IMAGE_NAME}-backend
+    - docker rm -f kafka
+    - docker rm -f zookeeper
     - docker network rm teraflowbridge
     - docker volume prune --force
     - docker image prune --force
-    - docker rm -f ${IMAGE_NAME}-backend
-    - docker rm -f zookeeper
-    - docker rm -f kafka
   rules:
     - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
     - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
@@ -151,19 +152,20 @@ unit_test telemetry-frontend:
     - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
     - echo $CRDB_ADDRESS
     - >
-      docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181 \
-      -e ALLOW_ANONYMOUS_LOGIN=yes \
+      docker run --name zookeeper -d --network=teraflowbridge -p 2181:2181
+      --env ALLOW_ANONYMOUS_LOGIN=yes
       bitnami/zookeeper:latest
     - sleep 10 # Wait for Zookeeper to start
-    - docker run --name kafka -d --network=teraflowbridge -p 9092:9092
+    - >
+      docker run --name kafka -d --network=teraflowbridge -p 9092:9092
       --env KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
       --env ALLOW_PLAINTEXT_LISTENER=yes
       bitnami/kafka:latest
     - sleep 20 # Wait for Kafka to start
     - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
     - echo $KAFKA_IP
-    # - docker logs zookeeper
-    # - docker logs kafka
+    - docker logs zookeeper
+    - docker logs kafka
     - >
       docker run --name $IMAGE_NAME-frontend -d -p 30050:30050
       --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require"
@@ -180,13 +182,13 @@ unit_test telemetry-frontend:
     - docker exec -i ${IMAGE_NAME}-frontend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
   coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
   after_script:
-    - docker volume rm -f crdb
-    - docker network rm teraflowbridge
-    - docker volume prune --force
-    - docker image prune --force
     - docker rm -f ${IMAGE_NAME}-frontend
     - docker rm -f zookeeper
     - docker rm -f kafka
+    - docker volume rm -f crdb
+    - docker volume prune --force
+    - docker image prune --force
+    - docker network rm teraflowbridge
   rules:
     - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
     - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
@@ -201,4 +203,4 @@ unit_test telemetry-frontend:
   artifacts:
       when: always
       reports:
-        junit: src/$IMAGE_NAME/frontend/tests/${IMAGE_NAME}-frontend_report.xml
\ No newline at end of file
+        junit: src/$IMAGE_NAME/frontend/tests/${IMAGE_NAME}-frontend_report.xml
diff --git a/src/telemetry/database/TelemetryEngine.py b/src/telemetry/database/TelemetryEngine.py
deleted file mode 100644
index 7c8620faf25e695e7f971bce78be9ad208a7701b..0000000000000000000000000000000000000000
--- a/src/telemetry/database/TelemetryEngine.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging, sqlalchemy
-from common.Settings import get_setting
-
-LOGGER = logging.getLogger(__name__)
-CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}'
-
-class TelemetryEngine:
-    @staticmethod
-    def get_engine() -> sqlalchemy.engine.Engine:
-        crdb_uri = get_setting('CRDB_URI', default=None)
-        if crdb_uri is None:
-            CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE')
-            CRDB_SQL_PORT  = get_setting('CRDB_SQL_PORT')
-            CRDB_DATABASE  = "tfs-telemetry"             # TODO: define variable get_setting('CRDB_DATABASE_KPI_MGMT')
-            CRDB_USERNAME  = get_setting('CRDB_USERNAME')
-            CRDB_PASSWORD  = get_setting('CRDB_PASSWORD')
-            CRDB_SSLMODE   = get_setting('CRDB_SSLMODE')
-            crdb_uri = CRDB_URI_TEMPLATE.format(
-                CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
-        try:
-            engine = sqlalchemy.create_engine(crdb_uri, echo=False)
-            LOGGER.info(' TelemetryDB initalized with DB URL: {:}'.format(crdb_uri))
-        except: # pylint: disable=bare-except # pragma: no cover
-            LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri)))
-            return None # type: ignore
-        return engine
diff --git a/src/telemetry/database/Telemetry_DB.py b/src/telemetry/database/Telemetry_DB.py
index 32acfd73a410a7bfddd6b487d0b1962afadb3842..110c7e80a4c36eed15417bfa05c4057ccb7fe292 100644
--- a/src/telemetry/database/Telemetry_DB.py
+++ b/src/telemetry/database/Telemetry_DB.py
@@ -13,125 +13,32 @@
 # limitations under the License.
 
 import logging
-import sqlalchemy_utils
-from sqlalchemy import inspect
-from sqlalchemy.orm import sessionmaker
-from telemetry.database.TelemetryModel import Collector as CollectorModel
-from telemetry.database.TelemetryEngine import TelemetryEngine
-from common.method_wrappers.ServiceExceptions import (
-    OperationFailedException, AlreadyExistsException )
+from common.method_wrappers.Decorator import MetricsPool
+from common.tools.database.GenericDatabase import Database
+from common.method_wrappers.ServiceExceptions import OperationFailedException
 
-LOGGER = logging.getLogger(__name__)
-DB_NAME = "tfs_telemetry"
+LOGGER       = logging.getLogger(__name__)
+METRICS_POOL = MetricsPool('TelemteryFrontend', 'Database')
 
-class TelemetryDB:
-    def __init__(self):
-        self.db_engine = TelemetryEngine.get_engine()
-        if self.db_engine is None:
-            LOGGER.error('Unable to get SQLAlchemy DB Engine...')
-            return False
-        self.db_name = DB_NAME
-        self.Session = sessionmaker(bind=self.db_engine)
-
-    def create_database(self):
-        if not sqlalchemy_utils.database_exists(self.db_engine.url):
-            LOGGER.debug("Database created. {:}".format(self.db_engine.url))
-            sqlalchemy_utils.create_database(self.db_engine.url)
-
-    def drop_database(self) -> None:
-        if sqlalchemy_utils.database_exists(self.db_engine.url):
-            sqlalchemy_utils.drop_database(self.db_engine.url)
-
-    def create_tables(self):
-        try:
-            CollectorModel.metadata.create_all(self.db_engine)     # type: ignore
-            LOGGER.debug("Tables created in the database: {:}".format(self.db_name))
-        except Exception as e:
-            LOGGER.debug("Tables cannot be created in the database. {:s}".format(str(e)))
-            raise OperationFailedException ("Tables can't be created", extra_details=["unable to create table {:}".format(e)])
-
-    def verify_tables(self):
-        try:
-            inspect_object = inspect(self.db_engine)
-            if(inspect_object.has_table('collector', None)):
-                LOGGER.info("Table exists in DB: {:}".format(self.db_name))
-        except Exception as e:
-            LOGGER.info("Unable to fetch Table names. {:s}".format(str(e)))
-
-# ----------------- CURD METHODs ---------------------
-
-    def add_row_to_db(self, row):
-        session = self.Session()
-        try:
-            session.add(row)
-            session.commit()
-            LOGGER.debug(f"Row inserted into {row.__class__.__name__} table.")
-            return True
-        except Exception as e:
-            session.rollback()
-            if "psycopg2.errors.UniqueViolation" in str(e):
-                LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}")
-                raise AlreadyExistsException(row.__class__.__name__, row,
-                                             extra_details=["Unique key voilation: {:}".format(e)] )
-            else:
-                LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}")
-                raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)])
-        finally:
-            session.close()
-    
-    def search_db_row_by_id(self, model, col_name, id_to_search):
-        session = self.Session()
-        try:
-            entity = session.query(model).filter_by(**{col_name: id_to_search}).first()
-            if entity:
-                # LOGGER.debug(f"{model.__name__} ID found: {str(entity)}")
-                return entity
-            else:
-                LOGGER.debug(f"{model.__name__} ID not found, No matching row: {str(id_to_search)}")
-                print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search))
-                return None
-        except Exception as e:
-            session.rollback()
-            LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}")
-            raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)])
-        finally:
-            session.close()
-    
-    def delete_db_row_by_id(self, model, col_name, id_to_search):
-        session = self.Session()
-        try:
-            record = session.query(model).filter_by(**{col_name: id_to_search}).first()
-            if record:
-                session.delete(record)
-                session.commit()
-                LOGGER.debug("Deleted %s with %s: %s", model.__name__, col_name, id_to_search)
-            else:
-                LOGGER.debug("%s with %s %s not found", model.__name__, col_name, id_to_search)
-                return None
-        except Exception as e:
-            session.rollback()
-            LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e)
-            raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)])
-        finally:
-            session.close()
+class TelemetryDB(Database):
+    def __init__(self, model) -> None:
+        LOGGER.info('Init KpiManagerService')
+        super().__init__(model)
     
     def select_with_filter(self, model, filter_object):
+        """
+        Generic method to create filters dynamically based on filter_object attributes.
+        params:     model:         SQLAlchemy model class to query.
+                    filter_object: Object that contains filtering criteria as attributes.
+        return:     SQLAlchemy session, query and Model
+        """
         session = self.Session()
         try:
-            query = session.query(CollectorModel)
-            # Apply filters based on the filter_object
+            query = session.query(model)
             if filter_object.kpi_id:
-                query = query.filter(CollectorModel.kpi_id.in_([k.kpi_id.uuid for k in filter_object.kpi_id]))     
-            result = query.all()
-            # query should be added to return all rows
-            if result:
-                LOGGER.debug(f"Fetched filtered rows from {model.__name__} table with filters: {filter_object}") #  - Results: {result}
-            else:
-                LOGGER.warning(f"No matching row found in {model.__name__} table with filters: {filter_object}")
-            return result
+                query = query.filter(model.kpi_id.in_([k.kpi_id.uuid for k in filter_object.kpi_id]))     
         except Exception as e:
-            LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filter_object} ::: {e}")
-            raise OperationFailedException ("Select by filter", extra_details=["unable to apply the filter {:}".format(e)])
-        finally:
-            session.close()
-
+            LOGGER.error(f"Error creating filter of {model.__name__} table. ERROR: {e}")
+            raise OperationFailedException ("CreateKpiDescriptorFilter", extra_details=["unable to create the filter {:}".format(e)]) 
+        
+        return super().select_with_filter(query, session, model)
diff --git a/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py b/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py
index 746790bf68fd3d843850fc96db526dcefad59283..ad99dff12dc641232972f8cff8226878caefd71b 100644
--- a/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py
+++ b/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py
@@ -40,7 +40,7 @@ ACTIVE_COLLECTORS = []       # keep and can be populated from DB
 class TelemetryFrontendServiceServicerImpl(TelemetryFrontendServiceServicer):
     def __init__(self):
         LOGGER.info('Init TelemetryFrontendService')
-        self.tele_db_obj = TelemetryDB()
+        self.tele_db_obj = TelemetryDB(CollectorModel)
         self.kafka_producer = KafkaProducer({'bootstrap.servers' : KafkaConfig.get_kafka_address()})
         self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(),
                                             'group.id'           : 'frontend',
diff --git a/src/telemetry/frontend/service/__main__.py b/src/telemetry/frontend/service/__main__.py
index 126e63b27d451721561e68b16f879cf001b23cca..58c622fc3da5906ba557cff829bb15965c164b34 100644
--- a/src/telemetry/frontend/service/__main__.py
+++ b/src/telemetry/frontend/service/__main__.py
@@ -16,6 +16,8 @@ import logging, signal, sys, threading
 from prometheus_client import start_http_server
 from common.Settings import get_log_level, get_metrics_port
 from .TelemetryFrontendService import TelemetryFrontendService
+from telemetry.database.TelemetryModel import Collector as Model
+from common.tools.database.GenericDatabase import Database
 
 terminate = threading.Event()
 LOGGER = None
@@ -36,6 +38,11 @@ def main():
 
     LOGGER.info('Starting...')
 
+    # To create DB 
+    kpiDBobj = Database(Model)
+    kpiDBobj.create_database()
+    kpiDBobj.create_tables()
+    
     # Start metrics server
     # metrics_port = get_metrics_port()
     # start_http_server(metrics_port)
diff --git a/src/telemetry/tests/test_telemetryDB.py b/src/telemetry/tests/test_telemetryDB.py
index c4976f8c2144fcdcad43a3e25d43091010de0d18..1b122e4bca266018c01044e2eb8a1ab277b3e3c3 100644
--- a/src/telemetry/tests/test_telemetryDB.py
+++ b/src/telemetry/tests/test_telemetryDB.py
@@ -21,8 +21,8 @@ LOGGER = logging.getLogger(__name__)
 def test_verify_databases_and_tables():
     LOGGER.info('>>> test_verify_databases_and_tables : START <<< ')
     TelemetryDBobj = TelemetryDB()
-    TelemetryDBobj.drop_database()
-    TelemetryDBobj.verify_tables()
+    # TelemetryDBobj.drop_database()
+    # TelemetryDBobj.verify_tables()
     TelemetryDBobj.create_database()
     TelemetryDBobj.create_tables()
-    TelemetryDBobj.verify_tables()
\ No newline at end of file
+    TelemetryDBobj.verify_tables()
diff --git a/src/tests/tools/mock_qkd_nodes/start.sh b/src/tests/tools/mock_qkd_nodes/start.sh
index b1bc56d5a7f90809e81c73a54803fb2dc11bacd9..faf2f84baf61f16565b497b53bf5f41f45007c00 100755
--- a/src/tests/tools/mock_qkd_nodes/start.sh
+++ b/src/tests/tools/mock_qkd_nodes/start.sh
@@ -23,8 +23,8 @@ killbg() {
 
 trap killbg EXIT
 pids=()
-flask --app mock run --host 0.0.0.0 --port 11111 & 
+flask run --host 0.0.0.0 --port 11111 & 
 pids+=($!)
-flask --app mock run --host 0.0.0.0 --port 22222 & 
+flask run --host 0.0.0.0 --port 22222 & 
 pids+=($!)
-flask --app mock run --host 0.0.0.0 --port 33333
+flask run --host 0.0.0.0 --port 33333
diff --git a/src/tests/tools/mock_qkd_nodes/mock.py b/src/tests/tools/mock_qkd_nodes/wsgi.py
similarity index 97%
rename from src/tests/tools/mock_qkd_nodes/mock.py
rename to src/tests/tools/mock_qkd_nodes/wsgi.py
index 7a606f6cac855fee9852f620c595908fbb3d36da..3f8847849337fbfb1a9f84c783786218db4fb04d 100644
--- a/src/tests/tools/mock_qkd_nodes/mock.py
+++ b/src/tests/tools/mock_qkd_nodes/wsgi.py
@@ -23,7 +23,7 @@ yang_validator = YangValidator('etsi-qkd-sdn-node', ['etsi-qkd-node-types'])
 
 
 nodes = {
-    '127.0.0.1:11111': {'node': {
+    '10.0.2.10:11111': {'node': {
             'qkdn_id': '00000001-0000-0000-0000-000000000000',
         },
         'qkdn_capabilities': {
@@ -54,7 +54,7 @@ nodes = {
                 {
                     'qkdi_id': '101',
                     'qkdi_att_point': {
-                        'device':'127.0.0.1',
+                        'device':'10.0.2.10',
                         'port':'1001'
                     },
                     'qkdi_capabilities': {
@@ -69,7 +69,7 @@ nodes = {
         }
     },
 
-    '127.0.0.1:22222': {'node': {
+    '10.0.2.10:22222': {'node': {
             'qkdn_id': '00000002-0000-0000-0000-000000000000',
         },
         'qkdn_capabilities': {
@@ -100,7 +100,7 @@ nodes = {
                 {
                     'qkdi_id': '201',
                     'qkdi_att_point': {
-                        'device':'127.0.0.1',
+                        'device':'10.0.2.10',
                         'port':'2001'
                     },
                     'qkdi_capabilities': {
@@ -109,7 +109,7 @@ nodes = {
                 {
                     'qkdi_id': '202',
                     'qkdi_att_point': {
-                        'device':'127.0.0.1',
+                        'device':'10.0.2.10',
                         'port':'2002'
                     },
                     'qkdi_capabilities': {
@@ -124,7 +124,7 @@ nodes = {
         }
     },
 
-    '127.0.0.1:33333': {'node': {
+    '10.0.2.10:33333': {'node': {
             'qkdn_id': '00000003-0000-0000-0000-000000000000',
         },
         'qkdn_capabilities': {
@@ -155,7 +155,7 @@ nodes = {
                 {
                     'qkdi_id': '301',
                     'qkdi_att_point': {
-                        'device':'127.0.0.1',
+                        'device':'10.0.2.10',
                         'port':'3001'
                     },
                     'qkdi_capabilities': {
diff --git a/src/webui/Dockerfile b/src/webui/Dockerfile
index 55e67b670f36812a55cf60e411cf137bc5b8a2ee..8295087667a6a25b209c584fc9627a5457f47f4d 100644
--- a/src/webui/Dockerfile
+++ b/src/webui/Dockerfile
@@ -84,9 +84,11 @@ COPY --chown=webui:webui src/service/__init__.py service/__init__.py
 COPY --chown=webui:webui src/service/client/. service/client/
 COPY --chown=webui:webui src/slice/__init__.py slice/__init__.py
 COPY --chown=webui:webui src/slice/client/. slice/client/
-COPY --chown=webui:webui src/webui/. webui/
+COPY --chown=webui:webui src/qkd_app/__init__.py qkd_app/__init__.py
+COPY --chown=webui:webui src/qkd_app/client/. qkd_app/client/
 COPY --chown=webui:webui src/bgpls_speaker/__init__.py bgpls_speaker/__init__.py
 COPY --chown=webui:webui src/bgpls_speaker/client/. bgpls_speaker/client/
+COPY --chown=webui:webui src/webui/. webui/
 
 # Start the service
 ENTRYPOINT ["python", "-m", "webui.service"]
diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py
index b864d3549e051b54e888c80547724da14fec5f67..f137c247e1c4d2f4be5707e72e4140ab75f8e886 100644
--- a/src/webui/service/__init__.py
+++ b/src/webui/service/__init__.py
@@ -19,6 +19,10 @@ from flask_healthz import healthz, HealthError
 from common.tools.grpc.Tools import grpc_message_to_json
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
+from qkd_app.client.QKDAppClient import QKDAppClient
+from common.Settings import (
+    is_deployed_bgpls, is_deployed_load_gen, is_deployed_policy, is_deployed_qkd_app, is_deployed_slice
+)
 
 def get_working_context() -> str:
     return session['context_uuid'] if 'context_uuid' in session else '---'
@@ -37,6 +41,10 @@ def readiness():
         device_client = DeviceClient()
         device_client.connect()
         device_client.close()
+        # DEPENDENCY QKD
+        qkd_app_client = QKDAppClient()
+        qkd_app_client.connect()
+        qkd_app_client.close()
     except Exception as e:
         raise HealthError("Can't connect with the service: {:s}".format(str(e))) from e
 
@@ -78,30 +86,33 @@ def create_app(use_config=None, web_app_root=None):
     
     app.register_blueprint(healthz, url_prefix='/healthz')
 
-    from webui.service.js.routes import js                  # pylint: disable=import-outside-toplevel
+    from webui.service.js.routes import js                   # pylint: disable=import-outside-toplevel
     app.register_blueprint(js)
 
-    from webui.service.main.routes import main              # pylint: disable=import-outside-toplevel
+    from webui.service.main.routes import main               # pylint: disable=import-outside-toplevel
     app.register_blueprint(main)
 
-    from webui.service.load_gen.routes import load_gen      # pylint: disable=import-outside-toplevel
+    from webui.service.load_gen.routes import load_gen       # pylint: disable=import-outside-toplevel
     app.register_blueprint(load_gen)
 
-    from webui.service.service.routes import service        # pylint: disable=import-outside-toplevel
+    from webui.service.service.routes import service         # pylint: disable=import-outside-toplevel
     app.register_blueprint(service)
 
-    from webui.service.slice.routes import slice            # pylint: disable=import-outside-toplevel,redefined-builtin
+    from webui.service.slice.routes import slice             # pylint: disable=import-outside-toplevel,redefined-builtin
     app.register_blueprint(slice)
 
-    from webui.service.device.routes import device          # pylint: disable=import-outside-toplevel
+    from webui.service.device.routes import device           # pylint: disable=import-outside-toplevel
     app.register_blueprint(device)
     
-    from webui.service.bgpls.routes import bgpls          # pylint: disable=import-outside-toplevel
+    from webui.service.bgpls.routes import bgpls             # pylint: disable=import-outside-toplevel
     app.register_blueprint(bgpls)
 
-    from webui.service.link.routes import link              # pylint: disable=import-outside-toplevel
+    from webui.service.link.routes import link               # pylint: disable=import-outside-toplevel
     app.register_blueprint(link)
 
+    from webui.service.qkd_app.routes import qkd_app         # pylint: disable=import-outside-toplevel
+    app.register_blueprint(qkd_app)
+
     from webui.service.policy_rule.routes import policy_rule # pylint: disable=import-outside-toplevel
     app.register_blueprint(policy_rule)
 
@@ -112,6 +123,12 @@ def create_app(use_config=None, web_app_root=None):
         'round'               : round,
         'get_working_context' : get_working_context,
         'get_working_topology': get_working_topology,
+
+        'is_deployed_bgpls'   : is_deployed_bgpls,
+        'is_deployed_load_gen': is_deployed_load_gen,
+        'is_deployed_policy'  : is_deployed_policy,
+        'is_deployed_qkd_app' : is_deployed_qkd_app,
+        'is_deployed_slice'   : is_deployed_slice,
     })
 
     if web_app_root is not None:
diff --git a/src/webui/service/__main__.py b/src/webui/service/__main__.py
index e9a906e8a431e287911547abc4065d9d9364ccb4..109c468c7daeda22efedf75a7293c6d8c6f038d8 100644
--- a/src/webui/service/__main__.py
+++ b/src/webui/service/__main__.py
@@ -33,6 +33,7 @@ def main():
     logging.basicConfig(level=log_level)
     logger = logging.getLogger(__name__)
 
+    # DEPENDENCY QKD
     wait_for_environment_variables([
         get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     ),
         get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
@@ -40,8 +41,6 @@ def main():
         get_env_var_name(ServiceNameEnum.DEVICE,  ENVVAR_SUFIX_SERVICE_PORT_GRPC),
         get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_HOST     ),
         get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
-        get_env_var_name(ServiceNameEnum.SLICE,   ENVVAR_SUFIX_SERVICE_HOST     ),
-        get_env_var_name(ServiceNameEnum.SLICE,   ENVVAR_SUFIX_SERVICE_PORT_GRPC),
     ])
 
     logger.info('Starting...')
diff --git a/src/webui/service/device/forms.py b/src/webui/service/device/forms.py
index e4c71d92170dc9fe46996a1c93978647800aa300..eebc06755f204cd270ff8feca21733cb4426493a 100644
--- a/src/webui/service/device/forms.py
+++ b/src/webui/service/device/forms.py
@@ -33,6 +33,7 @@ class AddDeviceForm(FlaskForm):
     device_drivers_gnmi_openconfig = BooleanField('GNMI OPENCONFIG')
     device_drivers_optical_tfs = BooleanField('OPTICAL TFS')
     device_drivers_ietf_actn = BooleanField('IETF ACTN')
+    device_drivers_qkd = BooleanField('QKD')
 
     device_config_address = StringField('connect/address',default='127.0.0.1',validators=[DataRequired(), Length(min=5)])
     device_config_port = StringField('connect/port',default='0',validators=[DataRequired(), Length(min=1)])
@@ -57,3 +58,4 @@ class UpdateDeviceForm(FlaskForm):
                            validators=[NumberRange(min=0)])
                         
     submit = SubmitField('Update')
+    
diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py
index b7fdb78e85dc634627de02947c0861a7f13bdae9..429f4a2ea8539b7b12baf5e20eb30760694ede64 100644
--- a/src/webui/service/device/routes.py
+++ b/src/webui/service/device/routes.py
@@ -129,6 +129,8 @@ def add():
             device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_OPTICAL_TFS)
         if form.device_drivers_ietf_actn.data:
             device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_IETF_ACTN)
+        if form.device_drivers_qkd.data:
+            device_drivers.append(DeviceDriverEnum.DEVICEDRIVER_QKD)
         device_obj.device_drivers.extend(device_drivers) # pylint: disable=no-member
 
         try:
diff --git a/src/webui/service/qkd_app/__init__.py b/src/webui/service/qkd_app/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02
--- /dev/null
+++ b/src/webui/service/qkd_app/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/webui/service/qkd_app/routes.py b/src/webui/service/qkd_app/routes.py
new file mode 100644
index 0000000000000000000000000000000000000000..71243fb75e552ec5568eedacdcadabbc39516b4e
--- /dev/null
+++ b/src/webui/service/qkd_app/routes.py
@@ -0,0 +1,113 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, json, logging
+
+from flask import current_app, render_template, Blueprint, flash, session, redirect, url_for
+from common.proto.context_pb2 import Empty, Link, LinkId, LinkList
+from common.proto.qkd_app_pb2 import App, QKDAppStatusEnum, QKDAppTypesEnum
+from common.tools.context_queries.Context import get_context
+from common.tools.context_queries.Device import get_device
+from common.tools.context_queries.Topology import get_topology
+from context.client.ContextClient import ContextClient
+from qkd_app.client.QKDAppClient import QKDAppClient
+
+
+LOGGER = logging.getLogger(__name__)
+qkd_app = Blueprint('qkd_app', __name__, url_prefix='/qkd_app')
+
+qkd_app_client = QKDAppClient()
+context_client = ContextClient()
+
+@qkd_app.get('/')
+def home():
+    if 'context_uuid' not in session or 'topology_uuid' not in session:
+        flash("Please select a context!", "warning")
+        return redirect(url_for("main.home"))
+    context_uuid = session['context_uuid']
+    topology_uuid = session['topology_uuid']
+
+    context_client.connect()
+    device_names = dict()
+
+    context_obj = get_context(context_client, context_uuid, rw_copy=False)
+    if context_obj is None:
+        flash('Context({:s}) not found'.format(str(context_uuid)), 'danger')
+        apps = list()
+    else:
+        try:
+            apps = qkd_app_client.ListApps(context_obj.context_id)
+            apps = apps.apps
+        except grpc.RpcError as e:
+            if e.code() != grpc.StatusCode.NOT_FOUND: raise
+            if e.details() != 'Context({:s}) not found'.format(context_uuid): raise
+            apps = list()
+        else:
+            # Too many requests to context_client if it has too many apps (update in the future)
+            for app in apps:
+                if app.local_device_id.device_uuid.uuid not in device_names:
+                    device = get_device(context_client, app.local_device_id.device_uuid.uuid)
+                    if device is not None:
+                        device_names[app.local_device_id.device_uuid.uuid] = device.name
+                
+                if app.remote_device_id.device_uuid.uuid and app.remote_device_id.device_uuid.uuid not in device_names:
+                    device = get_device(context_client, app.remote_device_id.device_uuid.uuid)
+                    if device is not None:
+                        device_names[app.remote_device_id.device_uuid.uuid] = device.name
+
+    context_client.close()
+    return render_template(
+        'qkd_app/home.html', apps=apps, device_names=device_names, ate=QKDAppTypesEnum, ase=QKDAppStatusEnum)
+
+
+@qkd_app.route('detail/<path:app_uuid>', methods=('GET', 'POST'))
+def detail(app_uuid: str):
+    '''
+    context_client.connect()
+    link_obj = get_link(context_client, link_uuid, rw_copy=False)
+    if link_obj is None:
+        flash('Link({:s}) not found'.format(str(link_uuid)), 'danger')
+        link_obj = Link()
+        device_names, endpoints_data = dict(), dict()
+    else:
+        device_names, endpoints_data = get_endpoint_names(context_client, link_obj.link_endpoint_ids)
+    context_client.close()
+    return render_template('link/detail.html',link=link_obj, device_names=device_names, endpoints_data=endpoints_data)
+    '''
+    pass
+
+@qkd_app.get('<path:app_uuid>/delete')
+def delete(app_uuid):
+    '''
+    try:
+
+        # first, check if link exists!
+        # request: LinkId = LinkId()
+        # request.link_uuid.uuid = link_uuid
+        # response: Link = client.GetLink(request)
+        # TODO: finalize implementation
+
+        request = LinkId()
+        request.link_uuid.uuid = link_uuid # pylint: disable=no-member
+        context_client.connect()
+        context_client.RemoveLink(request)
+        context_client.close()
+
+        flash(f'Link "{link_uuid}" deleted successfully!', 'success')
+    except Exception as e: # pylint: disable=broad-except
+        flash(f'Problem deleting link "{link_uuid}": {e.details()}', 'danger')
+        current_app.logger.exception(e)
+    return redirect(url_for('link.home'))
+    '''
+    pass
diff --git a/src/webui/service/service/__init__.py b/src/webui/service/service/__init__.py
index 3ee6f7071f145e06c3aeaefc09a43ccd88e619e3..5cf553eaaec41de7599b6723e31e4ca3f82cbcae 100644
--- a/src/webui/service/service/__init__.py
+++ b/src/webui/service/service/__init__.py
@@ -12,3 +12,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+
diff --git a/src/webui/service/service/forms.py b/src/webui/service/service/forms.py
index f07acf54365b79245583e7f9567b8bc4a5cfd89d..dad15f1c2dbef3a5d1c9a3ecdc6f96c00b883aa2 100644
--- a/src/webui/service/service/forms.py
+++ b/src/webui/service/service/forms.py
@@ -17,6 +17,11 @@ from flask_wtf import FlaskForm
 from wtforms import StringField, SelectField, IntegerField, DecimalField
 from wtforms.validators import InputRequired, Optional, NumberRange, ValidationError, StopValidation
 
+# Custom uuid validator
+def validate_uuid_address(form, field):
+    if not re.match(r'^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$', field.data):
+        raise ValidationError('Invalid uuid format')
+
 # Custom IPv4 address validator
 def validate_ipv4_address(form, field):
     try:
@@ -60,7 +65,25 @@ class CustomInputRequired():
             raise StopValidation(self.message)
         
 class AddServiceForm_1(FlaskForm):
-    service_type = SelectField('Type of service', choices=[('', 'Select a type of service to add'), ('ACL_L2', 'ACL_L2'), ('ACL_IPV4', 'ACL_IPV4'), ('ACL_IPV6', 'ACL_IPV6'), ('L2VPN', 'L2VPN'), ('L3VPN', 'L3VPN')], validators=[InputRequired()])
+    service_type = SelectField('Type of service', choices=[('', 'Select a type of service to add'), ('ACL_L2', 'ACL_L2'), ('ACL_IPV4', 'ACL_IPV4'), ('ACL_IPV6', 'ACL_IPV6'), ('L2VPN', 'L2VPN'), ('L3VPN', 'L3VPN'), ('QKD', 'QKD')], validators=[InputRequired()])
+
+class AddServiceForm_QKD(FlaskForm):
+    #GENERIC SERVICE PARAMETERS (COMMON & MANDATORY)
+    service_name       = StringField('Service Name', validators=[CustomInputRequired()])
+    service_type       = SelectField('Service Type', choices=[(6, '6 (QKD)')], validators=[CustomInputRequired()])
+    service_device_1   = SelectField('Device_1', choices=[('', 'Select a device (Mandatory)')], validators=[CustomInputRequired()])
+    service_device_2   = SelectField('Device_2', choices=[('', 'Select a device (Mandatory)')], validators=[CustomInputRequired()])
+    service_endpoint_1 = StringField('Device_1 Endpoint', validators=[CustomInputRequired()])
+    service_endpoint_2 = StringField('Device_2 Endpoint', validators=[CustomInputRequired()])
+    
+    #GENERIC SERVICE CONSTRAINT PARAMETERS (ALL OPTIONAL)
+    service_capacity    = DecimalField('Service Capacity', places=2, default=10.00, validators=[Optional(), NumberRange(min=0)])
+    service_latency     = DecimalField('Service Latency', places=2, default=15.20, validators=[Optional(), NumberRange(min=0)])
+    service_availability= DecimalField('Service Availability', places=2, validators=[Optional(), NumberRange(min=0)])
+    service_isolation   = SelectField('Service Isolation', choices=[('', 'Select (Optional)'), ('NO_ISOLATION', 'NO_ISOLATION'), ('PHYSICAL_ISOLATION', 'PHYSICAL_ISOLATION'), 
+                                                                    ('LOGICAL_ISOLATION', 'LOGICAL_ISOLATION'), ('PROCESS_ISOLATION', 'PROCESS_ISOLATION'), ('PHYSICAL_MEMORY_ISOLATION', 'PHYSICAL_MEMORY_ISOLATION'), 
+                                                                    ('PHYSICAL_NETWORK_ISOLATION', 'PHYSICAL_NETWORK_ISOLATION'), ('VIRTUAL_RESOURCE_ISOLATION', 'VIRTUAL_RESOURCE_ISOLATION'), 
+                                                                    ('NETWORK_FUNCTIONS_ISOLATION', 'NETWORK_FUNCTIONS_ISOLATION'), ('SERVICE_ISOLATION', 'SERVICE_ISOLATION')], validators=[Optional()])
 
 class AddServiceForm_ACL_L2(FlaskForm):
     #GENERIC SERVICE PARAMETERS (COMMON & MANDATORY)
@@ -259,3 +282,4 @@ class AddServiceForm_L3VPN(FlaskForm):
     Device_2_IF_address_ip  = StringField('Device_2 IP Address', validators=[CustomInputRequired(), validate_ipv4_address])
     Device_2_IF_address_prefix = IntegerField('Device_2 IP Prefix length', validators=[CustomInputRequired(), validate_uint32])
     Device_2_IF_description = StringField ('Device_2 SubIF Description', validators=[Optional()])
+    
diff --git a/src/webui/service/service/routes.py b/src/webui/service/service/routes.py
index 92025b2bec4f7c70b446a8c422e2cdb166c95466..c164b41773e15ac4e9746753e1fdc3b56a51b0d2 100644
--- a/src/webui/service/service/routes.py
+++ b/src/webui/service/service/routes.py
@@ -35,14 +35,14 @@ from common.tools.object_factory.Constraint import (
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Device import json_device_id
 from common.tools.object_factory.EndPoint import json_endpoint_id
-from common.tools.object_factory.Service import json_service_l2nm_planned, json_service_l3nm_planned
+from common.tools.object_factory.Service import json_service_l2nm_planned, json_service_l3nm_planned, json_service_qkd_planned
 from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
 from service.client.ServiceClient import ServiceClient
 from webui.service.service.forms import (
     AddServiceForm_1, AddServiceForm_ACL_L2, AddServiceForm_ACL_IPV4, AddServiceForm_ACL_IPV6,
-    AddServiceForm_L2VPN, AddServiceForm_L3VPN
+    AddServiceForm_L2VPN, AddServiceForm_L3VPN, AddServiceForm_QKD
 )
 
 LOGGER = logging.getLogger(__name__)
@@ -329,10 +329,83 @@ def add_configure():
     form_1 = AddServiceForm_1()
     if form_1.validate_on_submit():
         service_type = str(form_1.service_type.data)
-        if service_type in {'ACL_L2', 'ACL_IPV4', 'ACL_IPV6', 'L2VPN', 'L3VPN'}:
+        if service_type in {'ACL_L2', 'ACL_IPV4', 'ACL_IPV6', 'L2VPN', 'L3VPN', 'QKD'}:
             return redirect(url_for('service.add_configure_{:s}'.format(service_type)))
     return render_template('service/add.html', form_1=form_1, submit_text='Continue to configuraton')
 
+@service.route('add/configure/QKD', methods=['GET', 'POST'])
+def add_configure_QKD():
+    form_qkd = AddServiceForm_QKD()
+    service_obj = Service()
+
+    context_uuid, topology_uuid = get_context_and_topology_uuids()
+    if context_uuid and topology_uuid:
+        context_client.connect()
+        grpc_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid, rw_copy=False)
+        if grpc_topology:
+            topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids}          
+            devices = get_filtered_devices(context_client, topo_device_uuids)
+            grpc_devices = context_client.ListDevices(Empty())                                          
+            devices = [
+                device for device in grpc_devices.devices
+                if device.device_id.device_uuid.uuid in topo_device_uuids and DeviceDriverEnum.DEVICEDRIVER_QKD in device.device_drivers
+            ]
+            choices = get_device_choices(devices)
+            add_device_choices_to_form(choices, form_qkd.service_device_1)
+            add_device_choices_to_form(choices, form_qkd.service_device_2)
+        else:
+            flash('Context({:s})/Topology({:s}) not found'.format(str(context_uuid), str(topology_uuid)), 'danger')
+    else:
+        flash('Missing context or topology UUID', 'danger')
+
+    if form_qkd.validate_on_submit():
+        try:
+            [selected_device_1, selected_device_2, selected_endpoint_1, selected_endpoint_2] = validate_selected_devices_and_endpoints(form_qkd, devices)
+        except Exception as e:
+            flash('{:s}'.format(str(e.args[0])), 'danger')
+            current_app.logger.exception(e)
+            return render_template('service/configure_QKD.html', form_qkd=form_qkd, submit_text='Add New Service')
+        
+        service_uuid, service_type, endpoint_ids = set_service_parameters(service_obj, form_qkd, selected_device_1, selected_device_2, selected_endpoint_1, selected_endpoint_2)
+        constraints = add_constraints(form_qkd)
+        params_device_1_with_data = get_device_params(form_qkd, 1, service_type)
+        params_device_2_with_data = get_device_params(form_qkd, 2, service_type)
+        print(params_device_1_with_data)
+        print(params_device_2_with_data)
+        params_settings = {}
+        config_rules = [
+            json_config_rule_set(
+                    '/settings', params_settings
+                ),
+            json_config_rule_set(
+                '/device[{:s}]/endpoint[{:s}]/settings'.format(str(selected_device_1.name), str(selected_endpoint_1)), params_device_1_with_data
+            ),
+            json_config_rule_set(
+                '/device[{:s}]/endpoint[{:s}]/settings'.format(str(selected_device_2.name), str(selected_endpoint_2)), params_device_2_with_data
+            )
+        ]
+
+        service_client.connect()
+        context_client.connect()
+        device_client.connect()
+        descriptor_json = json_service_qkd_planned(service_uuid = service_uuid, endpoint_ids = endpoint_ids, constraints = constraints, config_rules = config_rules, context_uuid= context_uuid)
+        descriptor_json = {"services": [descriptor_json]}
+        try:
+            process_descriptors(descriptor_json)
+            flash('Service "{:s}" added successfully!'.format(service_obj.service_id.service_uuid.uuid), 'success')
+            return redirect(url_for('service.home', service_uuid=service_obj.service_id.service_uuid.uuid))
+        except Exception as e:
+            flash('Problem adding service: {:s}'.format((str(e.args[0]))), 'danger')
+            current_app.logger.exception(e)
+        finally:
+            context_client.close()                                                                                      
+            device_client.close()
+            service_client.close()
+
+    
+    return render_template('service/configure_QKD.html', form_qkd=form_qkd, submit_text='Add New Service')
+
+
 @service.route('add/configure/ACL_L2', methods=['GET', 'POST'])
 def add_configure_ACL_L2():
     form_acl = AddServiceForm_ACL_L2()
@@ -666,6 +739,9 @@ def get_device_params(form, device_num, form_type):
             'ni_description': str(getattr(form, 'NI_description').data),
             'subif_description': str(getattr(form, f'Device_{device_num}_IF_description').data),
         }
+    elif form_type == 6:
+        device_params = {
+        }
     else:
         raise ValueError(f'Unsupported form type: {form_type}')
 
diff --git a/src/webui/service/static/topology_icons/emu-qkd-node.png b/src/webui/service/static/topology_icons/emu-qkd-node.png
new file mode 100644
index 0000000000000000000000000000000000000000..d4dc1abaf42a56ff07d1f4a2c5d250b56486584d
Binary files /dev/null and b/src/webui/service/static/topology_icons/emu-qkd-node.png differ
diff --git a/src/webui/service/static/topology_icons/qkd-node.png b/src/webui/service/static/topology_icons/qkd-node.png
new file mode 100644
index 0000000000000000000000000000000000000000..79f40d2a600bd7f9e55d0360a132800c09a8ac85
Binary files /dev/null and b/src/webui/service/static/topology_icons/qkd-node.png differ
diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html
index c154346204a4ad59eec54a7e9ae3956a7f3db655..432f1a095be1a682624a45decf2355310e58238b 100644
--- a/src/webui/service/templates/base.html
+++ b/src/webui/service/templates/base.html
@@ -55,6 +55,7 @@
                   <a class="nav-link" href="{{ url_for('main.home') }}">Home</a>
                   {% endif %}
                 </li>
+
                 <li class="nav-item">
                   {% if '/device/' in request.path %}
                   <a class="nav-link active" aria-current="page" href="{{ url_for('device.home') }}">Device</a>
@@ -62,6 +63,7 @@
                   <a class="nav-link" href="{{ url_for('device.home') }}">Device</a>
                   {% endif %}
                 </li>
+
                 <li class="nav-item">
                   {% if '/link/' in request.path %}
                   <a class="nav-link active" aria-current="page" href="{{ url_for('link.home') }}">Link</a>
@@ -69,6 +71,7 @@
                   <a class="nav-link" href="{{ url_for('link.home') }}">Link</a>
                   {% endif %}
                 </li>
+
                 <li class="nav-item">
                   {% if '/service/' in request.path %}
                   <a class="nav-link active" aria-current="page" href="{{ url_for('service.home') }}">Service</a>
@@ -76,40 +79,63 @@
                   <a class="nav-link" href="{{ url_for('service.home') }}">Service</a>
                   {% endif %}
                 </li>
-                <li class="nav-item">
-                  {% if '/slice/' in request.path %}
-                  <a class="nav-link active" aria-current="page" href="{{ url_for('slice.home') }}">Slice</a>
-                  {% else %}
-                  <a class="nav-link" href="{{ url_for('slice.home') }}">Slice</a>
-                  {% endif %}
-                </li>
-                <li class="nav-item">
-                  {% if '/policy_rule/' in request.path %}
-                  <a class="nav-link active" aria-current="page" href="{{ url_for('policy_rule.home') }}">Policy Rules</a>
-                  {% else %}
-                  <a class="nav-link" href="{{ url_for('policy_rule.home') }}">Policy Rules</a>
-                  {% endif %}
-                </li>
+
+                {% if is_deployed_slice() %}
+                  <li class="nav-item">
+                    {% if '/slice/' in request.path %}
+                    <a class="nav-link active" aria-current="page" href="{{ url_for('slice.home') }}">Slice</a>
+                    {% else %}
+                    <a class="nav-link" href="{{ url_for('slice.home') }}">Slice</a>
+                    {% endif %}
+                  </li>
+                {% endif %}
+
+                {% if is_deployed_policy() %}
+                  <li class="nav-item">
+                    {% if '/policy_rule/' in request.path %}
+                    <a class="nav-link active" aria-current="page" href="{{ url_for('policy_rule.home') }}">Policy Rules</a>
+                    {% else %}
+                    <a class="nav-link" href="{{ url_for('policy_rule.home') }}">Policy Rules</a>
+                    {% endif %}
+                  </li>
+                {% endif %}
+
+                {% if is_deployed_qkd_app() %}
+                  <li class="nav-item">
+                    {% if '/qkd_app/' in request.path %}
+                    <a class="nav-link active" aria-current="page" href="{{ url_for('qkd_app.home') }}">QKD Apps</a>
+                    {% else %}
+                    <a class="nav-link" href="{{ url_for('qkd_app.home') }}">QKD Apps</a>
+                    {% endif %}
+                  </li> 
+                {% endif %}
+
+                {% if is_deployed_bgpls() %}
+                  <li class="nav-item">
+                    {% if '/bgpls/' in request.path %}
+                    <a class="nav-link active" aria-current="page" href="{{ url_for('bgpls.home') }}">BGPLS</a>
+                    {% else %}
+                    <a class="nav-link" href="{{ url_for('bgpls.home') }}">BGPLS</a>
+                    {% endif %}
+                  </li>
+                {% endif %}
+
+                {% if is_deployed_load_gen() %}
+                  <li class="nav-item">
+                    {% if '/load-gen/' in request.path %}
+                    <a class="nav-link active" aria-current="page" href="{{ url_for('load_gen.home') }}">Load Generator</a>
+                    {% else %}
+                    <a class="nav-link" href="{{ url_for('load_gen.home') }}">Load Generator</a>
+                    {% endif %}
+                  </li>
+                {% endif %}
+
                 <li class="nav-item">
                   <a class="nav-link" href="/grafana" id="grafana_link" target="grafana">Grafana</a>
                 </li>
                 <li class="nav-item">
                   <a class="nav-link" href="{{ url_for('main.debug') }}">Debug</a>
                 </li>
-                <li class="nav-item">
-                  {% if '/load-gen/' in request.path %}
-                  <a class="nav-link active" aria-current="page" href="{{ url_for('load_gen.home') }}">Load Generator</a>
-                  {% else %}
-                  <a class="nav-link" href="{{ url_for('load_gen.home') }}">Load Generator</a>
-                  {% endif %}
-                </li>
-                <li class="nav-item">
-                  {% if '/bgpls/' in request.path %}
-                  <a class="nav-link active" aria-current="page" href="{{ url_for('bgpls.home') }}">BGPLS</a>
-                  {% else %}
-                  <a class="nav-link" href="{{ url_for('bgpls.home') }}">BGPLS</a>
-                  {% endif %}
-                </li>
   
                 <!-- <li class="nav-item">
                   <a class="nav-link" href="#">Context</a>
diff --git a/src/webui/service/templates/device/add.html b/src/webui/service/templates/device/add.html
index 3bea6ae719a75c91835ceb35f50b5bbeba2c7940..e11c37688c09b96849c63a5d51cd7e546468d558 100644
--- a/src/webui/service/templates/device/add.html
+++ b/src/webui/service/templates/device/add.html
@@ -95,6 +95,7 @@
                 <br />
                 {{ form.device_drivers_optical_tfs }} {{ form.device_drivers_optical_tfs.label(class="col-sm-3 col-form-label") }}
                 {{ form.device_drivers_ietf_actn }} {{ form.device_drivers_ietf_actn.label(class="col-sm-3 col-form-label") }}
+                {{ form.device_drivers_qkd }} {{ form.device_drivers_qkd.label(class="col-sm-3 col-form-label") }}
                 {% endif %}
             </div>
         </div>
diff --git a/src/webui/service/templates/qkd_app/home.html b/src/webui/service/templates/qkd_app/home.html
new file mode 100644
index 0000000000000000000000000000000000000000..9573013f41410a5d8560e71c174ce6a85237089f
--- /dev/null
+++ b/src/webui/service/templates/qkd_app/home.html
@@ -0,0 +1,96 @@
+<!--
+ Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+ 
+{% extends 'base.html' %}
+
+{% block content %}
+    <h1>Apps</h1>
+
+    <div class="row">
+        <div class="col">
+            {{ apps | length }} apps found in context <i>{{ session['context_uuid'] }}</i>
+        </div>
+    </div>
+    
+    <table class="table table-striped table-hover">
+        <thead>
+          <tr>
+            <th scope="col">UUID</th>
+            <th scope="col">Status</th>
+            <th scope="col">Type</th>
+            <th scope="col">Device 1</th>
+            <th scope="col">Device 2</th>
+            <th scope="col"></th>
+          </tr>
+        </thead>
+        <tbody>
+            {% if apps %}
+                {% for app in apps %}
+                <tr>
+                    <td>
+                        {{ app.app_id.app_uuid.uuid }}
+                    </td>
+                    <td>
+                        {{ ase.Name(app.app_status).replace('QKDAPPSTATUS_', '') }}
+                    </td>
+                    <td>
+                        {{ ate.Name(app.app_type).replace('QKDAPPTYPES_', '').replace('CLIENT', 'EXTERNAL')  }}
+                    </td>
+                    <td>
+                        <li>
+                            <a href="{{ url_for('device.detail', device_uuid=app.local_device_id.device_uuid.uuid) }}">
+                                {{ device_names.get(app.local_device_id.device_uuid.uuid, app.local_device_id.device_uuid.uuid) }}
+                                <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
+                                    <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
+                                    <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
+                                </svg>
+                            </a>
+                        </li>
+                    </td>
+                    <td>
+                        {% if app.remote_device_id.device_uuid.uuid %}   
+                            <li>       
+                                <a href="{{ url_for('device.detail', device_uuid=app.remote_device_id.device_uuid.uuid) }}">
+                                    {{ device_names.get(app.remote_device_id.device_uuid.uuid, app.remote_device_id.device_uuid.uuid) }}
+                                    <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
+                                        <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
+                                        <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
+                                    </svg>
+                                </a>
+                            </li>
+                        {% endif %}
+                    </td>
+                    <td>
+                        <!--
+                            <a href="{{ url_for('qkd_app.detail', app_uuid=app.app_id.app_uuid.uuid) }}">
+                                <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
+                                    <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
+                                    <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
+                                </svg>
+                            </a>
+                        -->
+                    </td>
+                </tr>
+                {% endfor %}
+            {% else %}
+                <tr>
+                    <td colspan="7">No apps found</td>
+                </tr>
+            {% endif %}
+        </tbody>
+    </table>
+
+{% endblock %}
diff --git a/src/webui/service/templates/service/configure_QKD.html b/src/webui/service/templates/service/configure_QKD.html
new file mode 100644
index 0000000000000000000000000000000000000000..a01f4519d7f4b09732fba6d24db034f143be9943
--- /dev/null
+++ b/src/webui/service/templates/service/configure_QKD.html
@@ -0,0 +1,188 @@
+<!--
+ Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) 
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+{% extends 'base.html' %}
+
+{% block content %}
+<h1>Add New Service [QKD]</h1>
+<form method="POST" action="{{ url_for('service.add_configure_QKD') }}">
+    <fieldset>
+        <div class="row mb-3">
+            {{ form_qkd.hidden_tag() }}
+        </div> 
+        <h3>Generic Service Parameters</h3>
+        {% if form_qkd.qkd_params is not none %}
+        <div class="row mb-3">
+            {{ form_qkd.service_name.label(class="col-sm-2 col-form-label") }}
+            <div class="col-sm-10">
+                {% if form_qkd.service_name.errors %}
+                {{ form_qkd.service_name(class="form-control is-invalid", placeholder="Mandatory") }}
+                <div class="invalid-feedback">
+                    {% for error in form_qkd.service_name.errors %}
+                    <span>{{ error }}</span>
+                    {% endfor %}
+                </div>
+                {% else %}
+                {{ form_qkd.service_name(class="form-control", placeholder="Mandatory") }}
+                {% endif %}
+            </div>
+        </div>
+        <div class="row mb-3">
+            {{ form_qkd.service_type.label(class="col-sm-2 col-form-label") }}
+            <div class="col-sm-10">
+                {% if form_qkd.service_type.errors %}
+                {{ form_qkd.service_type(class="form-control is-invalid", placeholder="Mandatory") }}
+                <div class="invalid-feedback">
+                    {% for error in form_qkd.service_type.errors %}
+                    <span>{{ error }}</span>
+                    {% endfor %}
+                </div>
+                {% else %}
+                {{ form_qkd.service_type(class="form-control", placeholder="Mandatory") }}
+                {% endif %}
+            </div>
+        </div>
+        <div class="row mb-3">
+            {{ form_qkd.service_device_1.label(class="col-sm-2 col-form-label") }}
+            <div class="col-sm-4">
+                {% if form_qkd.service_device_1.errors %}
+                {{ form_qkd.service_device_1(class="form-control is-invalid", placeholder="Mandatory") }}
+                <div class="invalid-feedback">
+                    {% for error in form_qkd.service_device_1.errors %}
+                    <span>{{ error }}</span>
+                    {% endfor %}
+                </div>
+                {% else %}
+                {{ form_qkd.service_device_1(class="form-control", placeholder="Mandatory") }}
+                {% endif %}
+            </div>
+            {{ form_qkd.service_device_2.label(class="col-sm-2 col-form-label") }}
+            <div class="col-sm-4">
+                {% if form_qkd.service_device_2.errors %}
+                {{ form_qkd.service_device_2(class="form-control is-invalid", placeholder="Mandatory") }}
+                <div class="invalid-feedback">
+                    {% for error in form_qkd.service_device_2.errors %}
+                    <span>{{ error }}</span>
+                    {% endfor %}
+                </div>
+                {% else %}
+                {{ form_qkd.service_device_2(class="form-control", placeholder="Mandatory") }}
+                {% endif %}
+            </div>
+        </div>
+        <div class="row mb-3">
+            {{ form_qkd.service_endpoint_1.label(class="col-sm-2 col-form-label") }}
+            <div class="col-sm-4">
+                {% if form_qkd.service_endpoint_1.errors %}
+                {{ form_qkd.service_endpoint_1(class="form-control is-invalid", placeholder="Mandatory") }}
+                <div class="invalid-feedback">
+                    {% for error in form_qkd.service_endpoint_1.errors %}
+                    <span>{{ error }}</span>
+                    {% endfor %}
+                </div>
+                {% else %}
+                {{ form_qkd.service_endpoint_1(class="form-control", placeholder="Mandatory") }}
+                {% endif %}
+            </div>
+            {{ form_qkd.service_endpoint_2.label(class="col-sm-2 col-form-label") }}
+            <div class="col-sm-4">
+                {% if form_qkd.service_endpoint_2.errors %}
+                {{ form_qkd.service_endpoint_2(class="form-control is-invalid", placeholder="Mandatory") }}
+                <div class="invalid-feedback">
+                    {% for error in form_qkd.service_endpoint_2.errors %}
+                    <span>{{ error }}</span>
+                    {% endfor %}
+                </div>
+                {% else %}
+                {{ form_qkd.service_endpoint_2(class="form-control", placeholder="Mandatory") }}
+                {% endif %}
+            </div>
+        </div>
+        </br>
+        <h3>Generic Service Constraints</h3>
+        <div class="row mb-3">
+            {{ form_qkd.service_capacity.label(class="col-sm-2 col-form-label") }}
+            <div class="col-sm-10">
+                {% if form_qkd.service_capacity.errors %}
+                {{ form_qkd.service_capacity(class="form-control is-invalid") }}
+                <div class="invalid-feedback">
+                    {% for error in form_qkd.service_capacity.errors %}
+                    <span>{{ error }}</span>
+                    {% endfor %}
+                </div>
+                {% else %}
+                {{ form_qkd.service_capacity(class="form-control") }}
+                {% endif %}
+            </div>
+        </div>
+        <div class="row mb-3">
+            {{ form_qkd.service_latency.label(class="col-sm-2 col-form-label") }}
+            <div class="col-sm-10">
+                {% if form_qkd.service_latency.errors %}
+                {{ form_qkd.service_latency(class="form-control is-invalid") }}
+                <div class="invalid-feedback">
+                    {% for error in form_qkd.service_latency.errors %}
+                    <span>{{ error }}</span>
+                    {% endfor %}
+                </div>
+                {% else %}
+                {{ form_qkd.service_latency(class="form-control") }}
+                {% endif %}
+            </div>
+        </div>
+        <div class="row mb-3">
+            {{ form_qkd.service_availability.label(class="col-sm-2 col-form-label") }}
+            <div class="col-sm-10">
+                {% if form_qkd.service_availability.errors %}
+                {{ form_qkd.service_availability(class="form-control is-invalid") }}
+                <div class="invalid-feedback">
+                    {% for error in form_qkd.service_availability.errors %}
+                    <span>{{ error }}</span>
+                    {% endfor %}
+                </div>
+                {% else %}
+                {{ form_qkd.service_availability(class="form-control") }}
+                {% endif %}
+            </div>
+        </div>
+        <div class="row mb-3">
+            {{ form_qkd.service_isolation.label(class="col-sm-2 col-form-label") }}
+            <div class="col-sm-10">
+                {% if form_qkd.service_isolation.errors %}
+                {{ form_qkd.service_isolation(class="form-control is-invalid") }}
+                <div class="invalid-feedback">
+                    {% for error in form_qkd.service_isolation.errors %}
+                    <span>{{ error }}</span>
+                    {% endfor %}
+                </div>
+                {% else %}
+                {{ form_qkd.service_isolation(class="form-control") }}
+                {% endif %}
+            </div>
+        </div>
+        {% endif %}
+        <button type="submit" class="btn btn-primary">
+            <i class="bi bi-plus-circle-fill"></i>
+            {{ submit_text }}
+        </button>
+        <button type="button" class="btn btn-block btn-secondary" onclick="javascript: history.back()">
+            <i class="bi bi-box-arrow-in-left"></i>
+            Cancel
+        </button>
+    </fieldset>
+    </form>
+    {% endblock %}
+