diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index e2d653e0360b694891adc966d6d0b1124ed72ac4..0c5ff9325944d1a5a54d941d32d6a45782257970 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -45,6 +45,9 @@ include:
   #- local: '/src/dlt/.gitlab-ci.yml'
   - local: '/src/load_generator/.gitlab-ci.yml'
   - local: '/src/bgpls_speaker/.gitlab-ci.yml'
+  - local: '/src/kpi_manager/.gitlab-ci.yml'
+  - local: '/src/kpi_value_api/.gitlab-ci.yml'
+  - local: '/src/kpi_value_writer/.gitlab-ci.yml'
 
   # This should be last one: end-to-end integration tests
   - local: '/src/tests/.gitlab-ci.yml'
diff --git a/deploy/all.sh b/deploy/all.sh
index 2b9e219ea60fd96b386e82cbc505a1059587e1e1..f93cd92ac5e3189b0dc8fa71d74a586e929aaecc 100755
--- a/deploy/all.sh
+++ b/deploy/all.sh
@@ -29,6 +29,46 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
 # By default, only basic components are deployed
 export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice nbi webui load_generator"}
 
+# Uncomment to activate Monitoring (old)
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Monitoring Framework (new)
+#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api"
+
+# Uncomment to activate BGP-LS Speaker
+#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
+
+# Uncomment to activate Optical Controller
+#   To manage optical connections, "service" requires "opticalcontroller" to be deployed
+#   before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
+#   "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it.
+#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
+#    BEFORE="${TFS_COMPONENTS% service*}"
+#    AFTER="${TFS_COMPONENTS#* service}"
+#    export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}"
+#fi
+
+# Uncomment to activate ZTP
+#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp"
+
+# Uncomment to activate Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
+
+# Uncomment to activate Forecaster
+#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster"
+
+# Uncomment to activate E2E Orchestrator
+#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator"
+
 # If not already set, set the tag you want to use for your images.
 export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
 
@@ -67,8 +107,6 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
 export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
 
 # If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'.
-# "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while
-# checking/deploying CockroachDB.
 # - If CRDB_DEPLOY_MODE is "single", CockroachDB is deployed in single node mode. It is convenient for
 #   development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
 # - If CRDB_DEPLOY_MODE is "cluster", CockroachDB is deployed in cluster mode, and an entire CockroachDB cluster
@@ -80,7 +118,7 @@ export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"}
 
 # If not already set, disable flag for dropping database, if it exists.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION!
-# If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while
+# If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_DATABASE will be dropped while
 # checking/deploying CockroachDB.
 export CRDB_DROP_DATABASE_IF_EXISTS=${CRDB_DROP_DATABASE_IF_EXISTS:-""}
 
@@ -102,6 +140,14 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"}
 # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to.
 export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"}
 
+# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'.
+# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for
+#   development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
+# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster
+#   with 3 replicas (set by default) will be deployed. It is convenient for production and
+#   provides scalability features.
+export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"}
+
 # If not already set, disable flag for re-deploying NATS from scratch.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
 # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS.
@@ -137,7 +183,7 @@ export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
 # If not already set, disable flag for dropping tables if they exist.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION!
 # If QDB_DROP_TABLES_IF_EXIST is "YES", the tables pointed by variables
-# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped while 
+# QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped while
 # checking/deploying QuestDB.
 export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""}
 
diff --git a/deploy/crdb.sh b/deploy/crdb.sh
index c979ad4f2c18861c6a93b6b04e5d8e3e71aae41e..3e80b6350e66ec30a725c45acb7cf954ac3009c8 100755
--- a/deploy/crdb.sh
+++ b/deploy/crdb.sh
@@ -37,8 +37,6 @@ export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
 export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
 
 # If not already set, set CockroachDB installation mode. Accepted values are: 'single' and 'cluster'.
-# "YES", the database pointed by variable CRDB_NAMESPACE will be dropped while
-# checking/deploying CockroachDB.
 # - If CRDB_DEPLOY_MODE is "single", CockroachDB is deployed in single node mode. It is convenient for
 #   development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
 # - If CRDB_DEPLOY_MODE is "cluster", CockroachDB is deployed in cluster mode, and an entire CockroachDB cluster
@@ -48,7 +46,7 @@ export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
 #   Ref: https://www.cockroachlabs.com/docs/stable/recommended-production-settings.html
 export CRDB_DEPLOY_MODE=${CRDB_DEPLOY_MODE:-"single"}
 
-# If not already set, disable flag for dropping database if exists.
+# If not already set, disable flag for dropping database, if it exists.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE DATABASE INFORMATION!
 # If CRDB_DROP_DATABASE_IF_EXISTS is "YES", the database pointed by variable CRDB_DATABASE will be dropped while
 # checking/deploying CockroachDB.
@@ -79,7 +77,7 @@ function crdb_deploy_single() {
     kubectl create namespace ${CRDB_NAMESPACE}
     echo
 
-    echo "CockroachDB (single-node)"
+    echo "CockroachDB (single-mode)"
     echo ">>> Checking if CockroachDB is deployed..."
     if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
         echo ">>> CockroachDB is present; skipping step."
@@ -139,7 +137,7 @@ function crdb_deploy_single() {
 }
 
 function crdb_undeploy_single() {
-    echo "CockroachDB"
+    echo "CockroachDB (single-mode)"
     echo ">>> Checking if CockroachDB is deployed..."
     if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
         echo ">>> Undeploy CockroachDB"
@@ -223,7 +221,7 @@ function crdb_deploy_cluster() {
     kubectl create namespace ${CRDB_NAMESPACE}
     echo
 
-    echo "CockroachDB"
+    echo "CockroachDB (cluster-mode)"
     echo ">>> Checking if CockroachDB is deployed..."
     if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
         echo ">>> CockroachDB is present; skipping step."
@@ -319,7 +317,7 @@ function crdb_undeploy_cluster() {
     fi
     echo
 
-    echo "CockroachDB"
+    echo "CockroachDB (cluster-mode)"
     echo ">>> Checking if CockroachDB is deployed..."
     if kubectl get --namespace ${CRDB_NAMESPACE} statefulset/cockroachdb &> /dev/null; then
         echo ">>> Undeploy CockroachDB"
diff --git a/deploy/kafka.sh b/deploy/kafka.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4a91bfc9e657d1b8a6a548b9c0a81a2f8a0b45e0
--- /dev/null
+++ b/deploy/kafka.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+# If not already set, set the namespace where Apache Kafka will be deployed.
+export KFK_NAMESPACE=${KFK_NAMESPACE:-"kafka"}
+
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Constants
+TMP_FOLDER="./tmp"
+KFK_MANIFESTS_PATH="manifests/kafka"
+KFK_ZOOKEEPER_MANIFEST="01-zookeeper.yaml"
+KFK_MANIFEST="02-kafka.yaml"
+
+# Create a tmp folder for files modified during the deployment
+TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${KFK_NAMESPACE}/manifests"
+mkdir -p ${TMP_MANIFESTS_FOLDER}
+
+# copy zookeeper and kafka manifest files to temporary manifest location
+cp "${KFK_MANIFESTS_PATH}/${KFK_ZOOKEEPER_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}"
+cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}"
+
+echo "Apache Kafka Namespace"
+echo ">>> Delete Apache Kafka Namespace"
+kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found
+
+echo ">>> Create Apache Kafka Namespace"
+kubectl create namespace ${KFK_NAMESPACE}
+
+echo ">>> Deplying Apache Kafka Zookeeper"
+# Kafka zookeeper service should be deployed before the kafka service
+kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}"
+
+KFK_ZOOKEEPER_SERVICE="zookeeper-service"    # this command may be replaced with command to extract service name automatically
+KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}')
+
+# Kafka service should be deployed after the zookeeper service
+sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
+
+echo ">>> Deploying Apache Kafka Broker"
+kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST"
+
+echo ">>> Verifing Apache Kafka deployment"
+sleep 10
+KFK_PODS_STATUS=$(kubectl --namespace ${KFK_NAMESPACE} get pods)
+if echo "$KFK_PODS_STATUS" | grep -qEv 'STATUS|Running'; then
+    echo "Deployment Error: \n $KFK_PODS_STATUS"
+else
+    echo "$KFK_PODS_STATUS"
+fi
\ No newline at end of file
diff --git a/deploy/nats.sh b/deploy/nats.sh
index 366270a6915a1eef969846446ecc9152c3fa9531..e9cef883ee7b909255d44551919771ebc49f524b 100755
--- a/deploy/nats.sh
+++ b/deploy/nats.sh
@@ -27,6 +27,14 @@ export NATS_EXT_PORT_CLIENT=${NATS_EXT_PORT_CLIENT:-"4222"}
 # If not already set, set the external port NATS HTTP Mgmt GUI interface will be exposed to.
 export NATS_EXT_PORT_HTTP=${NATS_EXT_PORT_HTTP:-"8222"}
 
+# If not already set, set NATS installation mode. Accepted values are: 'single' and 'cluster'.
+# - If NATS_DEPLOY_MODE is "single", NATS is deployed in single node mode. It is convenient for
+#   development and testing purposes and should fit in a VM. IT SHOULD NOT BE USED IN PRODUCTION ENVIRONMENTS.
+# - If NATS_DEPLOY_MODE is "cluster", NATS is deployed in cluster mode, and an entire NATS cluster
+#   with 3 replicas (set by default) will be deployed. It is convenient for production and
+#   provides scalability features.
+export NATS_DEPLOY_MODE=${NATS_DEPLOY_MODE:-"single"}
+
 # If not already set, disable flag for re-deploying NATS from scratch.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE MESSAGE BROKER INFORMATION!
 # If NATS_REDEPLOY is "YES", the message broker will be dropped while checking/deploying NATS.
@@ -37,6 +45,14 @@ export NATS_REDEPLOY=${NATS_REDEPLOY:-""}
 # Automated steps start here
 ########################################################################################################################
 
+# Constants
+TMP_FOLDER="./tmp"
+NATS_MANIFESTS_PATH="manifests/nats"
+
+# Create a tmp folder for files modified during the deployment
+TMP_MANIFESTS_FOLDER="${TMP_FOLDER}/${NATS_NAMESPACE}/manifests"
+mkdir -p $TMP_MANIFESTS_FOLDER
+
 function nats_deploy_single() {
     echo "NATS Namespace"
     echo ">>> Create NATS Namespace (if missing)"
@@ -47,18 +63,86 @@ function nats_deploy_single() {
     helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/
     echo
 
+    echo "Install NATS (single-mode)"
+    echo ">>> Checking if NATS is deployed..."
+    if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
+        echo ">>> NATS is present; skipping step."
+    else
+        echo ">>> Deploy NATS"
+        helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine --set config.cluster.enabled=true --set config.cluster.tls.enabled=true
+
+
+        echo ">>> Waiting NATS statefulset to be created..."
+        while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+
+        # Wait for statefulset condition "Available=True" does not work
+        # Wait for statefulset condition "jsonpath='{.status.readyReplicas}'=3" throws error:
+        #   "error: readyReplicas is not found"
+        # Workaround: Check the pods are ready
+        #echo ">>> NATS statefulset created. Waiting for readiness condition..."
+        #kubectl wait --namespace  ${NATS_NAMESPACE} --for=condition=Available=True --timeout=300s statefulset/nats
+        #kubectl wait --namespace ${NATS_NAMESPACE} --for=jsonpath='{.status.readyReplicas}'=3 --timeout=300s \
+        #    statefulset/nats
+        echo ">>> NATS statefulset created. Waiting NATS pods to be created..."
+        while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-0 &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+        kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0
+    fi
+    echo
+
+    echo "NATS Port Mapping"
+    echo ">>> Expose NATS Client port (4222->${NATS_EXT_PORT_CLIENT})"
+    NATS_PORT_CLIENT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
+    PATCH='{"data": {"'${NATS_EXT_PORT_CLIENT}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_CLIENT}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${NATS_EXT_PORT_CLIENT}', "hostPort": '${NATS_EXT_PORT_CLIENT}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+
+    echo ">>> Expose NATS HTTP Mgmt GUI port (8222->${NATS_EXT_PORT_HTTP})"
+    NATS_PORT_HTTP=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="monitor")].port}')
+    PATCH='{"data": {"'${NATS_EXT_PORT_HTTP}'": "'${NATS_NAMESPACE}'/'${NATS_NAMESPACE}':'${NATS_PORT_HTTP}'"}}'
+    kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+    PORT_MAP='{"containerPort": '${NATS_EXT_PORT_HTTP}', "hostPort": '${NATS_EXT_PORT_HTTP}'}'
+    CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+    PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+    kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+    echo
+}
+
+
+function nats_deploy_cluster() {
+    echo "NATS Namespace"
+    echo ">>> Create NATS Namespace (if missing)"
+    kubectl create namespace ${NATS_NAMESPACE}
+    echo
+
+    echo "Add NATS Helm Chart"
+    helm3 repo add nats https://nats-io.github.io/k8s/helm/charts/
+    echo
+
     echo "Upgrade NATS Helm Chart"
     helm3 repo update nats
     echo
 
-    echo "Install NATS (single-node)"
+    echo "Install NATS (cluster-mode)"
     echo ">>> Checking if NATS is deployed..."
     if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
         echo ">>> NATS is present; skipping step."
     else
         echo ">>> Deploy NATS"
-        helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine
-
+        cp "${NATS_MANIFESTS_PATH}/cluster.yaml" "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml"
+        helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} -f "${TMP_MANIFESTS_FOLDER}/nats_cluster.yaml"
+    
         echo ">>> Waiting NATS statefulset to be created..."
         while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
             printf "%c" "."
@@ -78,7 +162,17 @@ function nats_deploy_single() {
             printf "%c" "."
             sleep 1
         done
+        while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-1 &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
+        while ! kubectl get --namespace ${NATS_NAMESPACE} pod/${NATS_NAMESPACE}-2 &> /dev/null; do
+            printf "%c" "."
+            sleep 1
+        done
         kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-0
+        kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-1
+        kubectl wait --namespace ${NATS_NAMESPACE} --for=condition=Ready --timeout=300s pod/${NATS_NAMESPACE}-2
     fi
     echo
 
@@ -110,7 +204,7 @@ function nats_deploy_single() {
     echo
 }
 
-function nats_undeploy_single() {
+function nats_undeploy() {
     echo "NATS"
     echo ">>> Checking if NATS is deployed..."
     if kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; then
@@ -128,7 +222,13 @@ function nats_undeploy_single() {
 }
 
 if [ "$NATS_REDEPLOY" == "YES" ]; then
-    nats_undeploy_single
+    nats_undeploy
 fi
 
-nats_deploy_single
+if [ "$NATS_DEPLOY_MODE" == "single" ]; then
+    nats_deploy_single
+elif [ "$NATS_DEPLOY_MODE" == "cluster" ]; then
+    nats_deploy_cluster
+else
+    echo "Unsupported value: NATS_DEPLOY_MODE=$NATS_DEPLOY_MODE"
+fi
\ No newline at end of file
diff --git a/deploy/qdb.sh b/deploy/qdb.sh
index acbcfd4f96ccbd2b09d5d82f66a1bf801a710780..ebb75dce9ad3007145a5129df3a4037a9392e875 100755
--- a/deploy/qdb.sh
+++ b/deploy/qdb.sh
@@ -44,7 +44,7 @@ export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
 
 # If not already set, disable flag for dropping tables if they exist.
 # WARNING: ACTIVATING THIS FLAG IMPLIES LOOSING THE TABLE INFORMATION!
-# If QDB_DROP_TABLES_IF_EXIST is "YES", the table pointed by variables
+# If QDB_DROP_TABLES_IF_EXIST is "YES", the tables pointed by variables
 # QDB_TABLE_MONITORING_KPIS and QDB_TABLE_SLICE_GROUPS will be dropped
 # while checking/deploying QuestDB.
 export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""}
diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index 2c152fd603346b4cc98a394badac49cc73152fab..26ed52f82d1fe117203f047e5281e26df287ef9b 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -154,10 +154,23 @@ printf "\n"
 
 echo "Create secret with CockroachDB data"
 CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+CRDB_DATABASE_CONTEXT=${CRDB_DATABASE}  # TODO: change by specific configurable environment variable
 kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
     --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
     --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
-    --from-literal=CRDB_DATABASE=${CRDB_DATABASE} \
+    --from-literal=CRDB_DATABASE=${CRDB_DATABASE_CONTEXT} \
+    --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
+    --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
+    --from-literal=CRDB_SSLMODE=require
+printf "\n"
+
+echo "Create secret with CockroachDB data for KPI Management"
+CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+CRDB_DATABASE_KPI_MGMT="tfs_kpi_mgmt"  # TODO: change by specific configurable environment variable
+kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
+    --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
+    --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
+    --from-literal=CRDB_DATABASE=${CRDB_DATABASE_KPI_MGMT} \
     --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
     --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
     --from-literal=CRDB_SSLMODE=require
@@ -234,6 +247,14 @@ if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then
     DOCKER_BUILD="docker buildx build"
 fi
 
+LINKERD_STATUS="$(microk8s status -a linkerd)"
+if [[ $linkerd_status =~ "enabled" ]]; then
+    echo "LinkerD installed: workloads will be injected"
+else
+    echo "LinkerD not installed"
+fi
+printf "\n"
+
 for COMPONENT in $TFS_COMPONENTS; do
     echo "Processing '$COMPONENT' component..."
 
@@ -314,8 +335,11 @@ for COMPONENT in $TFS_COMPONENTS; do
 
     echo "  Adapting '$COMPONENT' manifest file..."
     MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
-    # cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
-    cat ./manifests/"${COMPONENT}"service.yaml | linkerd inject - --proxy-cpu-request "10m" --proxy-cpu-limit "1" --proxy-memory-request "64Mi" --proxy-memory-limit "256Mi" > "$MANIFEST"
+    if [[ $linkerd_status =~ "enabled" ]]; then
+        cat ./manifests/"${COMPONENT}"service.yaml | linkerd inject - --proxy-cpu-request "10m" --proxy-cpu-limit "1" --proxy-memory-request "64Mi" --proxy-memory-limit "256Mi" > "$MANIFEST"
+    else
+        cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
+    fi
 
     if [ "$COMPONENT" == "pathcomp" ]; then
         IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
@@ -351,7 +375,7 @@ for COMPONENT in $TFS_COMPONENTS; do
     echo "  Deploying '$COMPONENT' component to Kubernetes..."
     DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log"
     kubectl --namespace $TFS_K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
-    COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
+    COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/g")
     #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
     #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
 
@@ -402,7 +426,7 @@ printf "\n"
 
 for COMPONENT in $TFS_COMPONENTS; do
     echo "Waiting for '$COMPONENT' component..."
-    COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
+    COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/g")
     kubectl wait --namespace $TFS_K8S_NAMESPACE \
         --for='condition=available' --timeout=90s deployment/${COMPONENT_OBJNAME}service
     WAIT_EXIT_CODE=$?
diff --git a/install_requirements.sh b/install_requirements.sh
index cbd378eca81af17386100fc0ceb3757912d0ebf5..54b660a521dadc08a344d2f79f2db15271131a21 100755
--- a/install_requirements.sh
+++ b/install_requirements.sh
@@ -22,6 +22,7 @@
 ALL_COMPONENTS="context device service nbi monitoring webui interdomain slice"
 ALL_COMPONENTS="${ALL_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector"
 ALL_COMPONENTS="${ALL_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector"
+ALL_COMPONENTS="${ALL_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api"
 TFS_COMPONENTS=${TFS_COMPONENTS:-$ALL_COMPONENTS}
 
 # Some components require libyang built from source code
diff --git a/manifests/cockroachdb/cluster.yaml b/manifests/cockroachdb/cluster.yaml
index 4d9ef0f844b5ffb02753b6cc7a7be7d03928896c..bcb0c704948ecdbd8b271b68e685c481e669594b 100644
--- a/manifests/cockroachdb/cluster.yaml
+++ b/manifests/cockroachdb/cluster.yaml
@@ -39,8 +39,8 @@ spec:
       cpu: 8
       memory: 8Gi
   tlsEnabled: true
-# You can set either a version of the db or a specific image name
-# cockroachDBVersion: v22.2.8
+  # You can set either a version of the db or a specific image name
+  # cockroachDBVersion: v22.2.8
   image:
     name: cockroachdb/cockroach:v22.2.8
   # nodes refers to the number of crdb pods that are created
@@ -49,21 +49,16 @@ spec:
   additionalLabels:
     crdb: is-cool
   # affinity is a new API field that is behind a feature gate that is
-  # disabled by default.  To enable please see the operator.yaml file.
+  # disabled by default. To enable please see the operator.yaml file.
 
   # The affinity field will accept any podSpec affinity rule.
-  # affinity:
-  #   podAntiAffinity:
-  #      preferredDuringSchedulingIgnoredDuringExecution:
-  #      - weight: 100
-  #        podAffinityTerm:
-  #          labelSelector:
-  #            matchExpressions:
-  #            - key: app.kubernetes.io/instance
-  #              operator: In
-  #              values:
-  #              - cockroachdb
-  #          topologyKey: kubernetes.io/hostname
+  topologySpreadConstraints:
+  - maxSkew: 1
+    topologyKey: kubernetes.io/hostname  
+    whenUnsatisfiable: ScheduleAnyway
+    labelSelector:
+      matchLabels:
+        app.kubernetes.io/instance: cockroachdb
 
   # nodeSelectors used to match against
   # nodeSelector:
diff --git a/manifests/cockroachdb/operator.yaml b/manifests/cockroachdb/operator.yaml
index 59d515061c4c0f253523aab803653b3f33007461..d8e691308e4cc16af3f545d87244281ab0730696 100644
--- a/manifests/cockroachdb/operator.yaml
+++ b/manifests/cockroachdb/operator.yaml
@@ -381,6 +381,7 @@ spec:
     spec:
       containers:
       - args:
+        - -feature-gates=TolerationRules=true,AffinityRules=true,TopologySpreadRules=true
         - -zap-log-level
         - info
         env:
diff --git a/manifests/kafka/01-zookeeper.yaml b/manifests/kafka/01-zookeeper.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c0e87ae0c6f12ed56702220f9e15fbe90b3b9c31
--- /dev/null
+++ b/manifests/kafka/01-zookeeper.yaml
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: zookeeper-service
+  name: zookeeper-service
+  namespace: kafka
+spec:
+  type: NodePort
+  ports:
+    - name: zookeeper-port
+      port: 2181
+      nodePort: 30181
+      targetPort: 2181
+  selector:
+    app: zookeeper
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    app: zookeeper
+  name: zookeeper
+  namespace: kafka
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: zookeeper
+  template:
+    metadata:
+      labels:
+        app: zookeeper
+    spec:
+      containers:
+        - image: wurstmeister/zookeeper
+          imagePullPolicy: IfNotPresent
+          name: zookeeper
+          ports:
+            - containerPort: 2181
\ No newline at end of file
diff --git a/manifests/kafka/02-kafka.yaml b/manifests/kafka/02-kafka.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8e4562e6eabec34bf3b87912310479bd98022aeb
--- /dev/null
+++ b/manifests/kafka/02-kafka.yaml
@@ -0,0 +1,61 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: kafka-broker
+  name: kafka-service
+  namespace: kafka
+spec:
+  ports:
+  - port: 9092
+  selector:
+    app: kafka-broker
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    app: kafka-broker
+  name: kafka-broker
+  namespace: kafka
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: kafka-broker
+  template:
+    metadata:
+      labels:
+        app: kafka-broker
+    spec:
+      hostname: kafka-broker
+      containers:
+      - env:
+        - name: KAFKA_BROKER_ID
+          value: "1"
+        - name: KAFKA_ZOOKEEPER_CONNECT
+          value: <ZOOKEEPER_INTERNAL_IP>:2181
+        - name: KAFKA_LISTENERS
+          value: PLAINTEXT://:9092
+        - name: KAFKA_ADVERTISED_LISTENERS
+          value: PLAINTEXT://localhost:9092
+        image: wurstmeister/kafka
+        imagePullPolicy: IfNotPresent
+        name: kafka-broker
+        ports:
+          - containerPort: 9092
\ No newline at end of file
diff --git a/manifests/kpi_managerservice.yaml b/manifests/kpi_managerservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..984d783a9de7ed3c0c02e87d82ec673dc19c9508
--- /dev/null
+++ b/manifests/kpi_managerservice.yaml
@@ -0,0 +1,99 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: kpi-managerservice
+spec:
+  selector:
+    matchLabels:
+      app: kpi-managerservice
+  #replicas: 1
+  template:
+    metadata:
+      annotations:
+        config.linkerd.io/skip-outbound-ports: "4222"
+      labels:
+        app: kpi-managerservice
+    spec:
+      terminationGracePeriodSeconds: 5
+      containers:
+        - name: server
+          image: labs.etsi.org:5050/tfs/controller/kpi_manager:latest
+          imagePullPolicy: Always
+          ports:
+            - containerPort: 30010
+            - containerPort: 9192
+          env:
+            - name: LOG_LEVEL
+              value: "INFO"
+          envFrom:
+            - secretRef:
+                name: crdb-kpi-data
+          readinessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30010"]
+          livenessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30010"]
+          resources:
+            requests:
+              cpu: 250m
+              memory: 128Mi
+            limits:
+              cpu: 1000m
+              memory: 1024Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: kpi-managerservice
+  labels:
+    app: kpi-managerservice
+spec:
+  type: ClusterIP
+  selector:
+    app: kpi-managerservice
+  ports:
+    - name: grpc
+      protocol: TCP
+      port: 30010
+      targetPort: 30010
+    - name: metrics
+      protocol: TCP
+      port: 9192
+      targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: kpi-managerservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: kpi-managerservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+    - type: Resource
+      resource:
+        name: cpu
+        target:
+          type: Utilization
+          averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/kpi_value_apiservice.yaml b/manifests/kpi_value_apiservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..74eb90f675794f1b451b04af55e191edff58fae5
--- /dev/null
+++ b/manifests/kpi_value_apiservice.yaml
@@ -0,0 +1,96 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: kpi-value-apiservice
+spec:
+  selector:
+    matchLabels:
+      app: kpi-value-apiservice
+  #replicas: 1
+  template:
+    metadata:
+      annotations:
+        config.linkerd.io/skip-outbound-ports: "4222"
+      labels:
+        app: kpi-value-apiservice
+    spec:
+      terminationGracePeriodSeconds: 5
+      containers:
+        - name: server
+          image: labs.etsi.org:5050/tfs/controller/kpi_value_api:latest
+          imagePullPolicy: Always
+          ports:
+            - containerPort: 30020
+            - containerPort: 9192
+          env:
+            - name: LOG_LEVEL
+              value: "INFO"
+          readinessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30020"]
+          livenessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30020"]
+          resources:
+            requests:
+              cpu: 250m
+              memory: 128Mi
+            limits:
+              cpu: 1000m
+              memory: 1024Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: kpi-value-apiservice
+  labels:
+    app: kpi-value-apiservice
+spec:
+  type: ClusterIP
+  selector:
+    app: kpi-value-apiservice
+  ports:
+    - name: grpc
+      protocol: TCP
+      port: 30020
+      targetPort: 30020
+    - name: metrics
+      protocol: TCP
+      port: 9192
+      targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: kpi-value-apiservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: kpi-value-apiservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+    - type: Resource
+      resource:
+        name: cpu
+        target:
+          type: Utilization
+          averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/kpi_value_writerservice.yaml b/manifests/kpi_value_writerservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8a8e44ec2a571f1290e30a08d1c896a6339cbe46
--- /dev/null
+++ b/manifests/kpi_value_writerservice.yaml
@@ -0,0 +1,96 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: kpi-value-writerservice
+spec:
+  selector:
+    matchLabels:
+      app: kpi-value-writerservice
+  #replicas: 1
+  template:
+    metadata:
+      annotations:
+        config.linkerd.io/skip-outbound-ports: "4222"
+      labels:
+        app: kpi-value-writerservice
+    spec:
+      terminationGracePeriodSeconds: 5
+      containers:
+        - name: server
+          image: labs.etsi.org:5050/tfs/controller/kpi_value_writer:latest
+          imagePullPolicy: Always
+          ports:
+            - containerPort: 30030
+            - containerPort: 9192
+          env:
+            - name: LOG_LEVEL
+              value: "INFO"
+          readinessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30030"]
+          livenessProbe:
+            exec:
+              command: ["/bin/grpc_health_probe", "-addr=:30030"]
+          resources:
+            requests:
+              cpu: 250m
+              memory: 128Mi
+            limits:
+              cpu: 1000m
+              memory: 1024Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: kpi-value-writerservice
+  labels:
+    app: kpi-value-writerservice
+spec:
+  type: ClusterIP
+  selector:
+    app: kpi-value-writerservice
+  ports:
+    - name: grpc
+      protocol: TCP
+      port: 30030
+      targetPort: 30030
+    - name: metrics
+      protocol: TCP
+      port: 9192
+      targetPort: 9192
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: kpi-value-writerservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: kpi-value-writerservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+    - type: Resource
+      resource:
+        name: cpu
+        target:
+          type: Utilization
+          averageUtilization: 80
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/manifests/nats/cluster.yaml b/manifests/nats/cluster.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..00dbef17fca74ca906d4f97ee6e8751c03ef493f
--- /dev/null
+++ b/manifests/nats/cluster.yaml
@@ -0,0 +1,47 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+container:
+  image:
+    tags: 2.9-alpine
+  env:
+    # different from k8s units, suffix must be B, KiB, MiB, GiB, or TiB
+    # should be ~90% of memory limit
+    GOMEMLIMIT: 400MiB
+  merge:
+    # recommended limit is at least 2 CPU cores and 8Gi Memory for production JetStream clusters
+    resources:
+      requests:
+        cpu: 1
+        memory: 500Mi
+      limits:
+        cpu: 1
+        memory: 1Gi
+
+config:
+  cluster:
+    enabled: true
+    replicas: 3
+  jetstream:
+    enabled: true
+    fileStore:
+      pvc:
+        size: 4Gi
+
+# Force one pod per node, if possible
+podTemplate:
+  topologySpreadConstraints:
+    kubernetes.io/hostname:
+      maxSkew: 1
+      whenUnsatisfiable: ScheduleAnyway
diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml
index 0892f0c9b790b936df5540ac5fe1aed0270b91a5..955d5726a9f8f79560327a8f595c1865f6d37d22 100644
--- a/manifests/nginx_ingress_http.yaml
+++ b/manifests/nginx_ingress_http.yaml
@@ -18,6 +18,11 @@ metadata:
   name: tfs-ingress
   annotations:
     nginx.ingress.kubernetes.io/rewrite-target: /$2
+    nginx.ingress.kubernetes.io/limit-rps: "50"
+    nginx.ingress.kubernetes.io/limit-connections: "50"
+    nginx.ingress.kubernetes.io/proxy-connect-timeout: "50"
+    nginx.ingress.kubernetes.io/proxy-send-timeout: "50"
+    nginx.ingress.kubernetes.io/proxy-read-timeout: "50"
 spec:
   rules:
     - http:
diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml
index a519aa4a2f8a1e81f1b7f2a1be1965ec0b8bb386..19317323f2a60293a33d740b28b3795627846642 100644
--- a/manifests/webuiservice.yaml
+++ b/manifests/webuiservice.yaml
@@ -117,3 +117,25 @@ spec:
     - name: grafana
       port: 3000
       targetPort: 3000
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: webuiservice-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: webuiservice
+  minReplicas: 1
+  maxReplicas: 20
+  metrics:
+    - type: Resource
+      resource:
+        name: cpu
+        target:
+          type: Utilization
+          averageUtilization: 50
+  #behavior:
+  #  scaleDown:
+  #    stabilizationWindowSeconds: 30
diff --git a/my_deploy.sh b/my_deploy.sh
index c0f2196c8db4b73c65fdbf31e9d6589f3cbf950b..bb1816bb921a342ce82eb40e20c9652ef3b64b75 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -22,9 +22,12 @@ export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
 export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator interdomain dlt"
 
-# Uncomment to activate Monitoring
+# Uncomment to activate Monitoring (old)
 #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
 
+# Uncomment to activate Monitoring Framework (new)
+#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api"
+
 # Uncomment to activate BGP-LS Speaker
 #export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
 
@@ -69,7 +72,7 @@ export TFS_K8S_NAMESPACE="tfs"
 export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
 
 # Uncomment to monitor performance of components
-export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
 
 # Uncomment when deploying Optical CyberSecurity
 #export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
@@ -123,6 +126,10 @@ export NATS_EXT_PORT_CLIENT="4222"
 # Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
 export NATS_EXT_PORT_HTTP="8222"
 
+# Set NATS installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/nats.sh for additional details
+export NATS_DEPLOY_MODE="single"
+
 # Disable flag for re-deploying NATS from scratch.
 export NATS_REDEPLOY=""
 
@@ -167,3 +174,10 @@ export PROM_EXT_PORT_HTTP="9090"
 
 # Set the external port Grafana HTTP Dashboards will be exposed to.
 export GRAF_EXT_PORT_HTTP="3000"
+
+
+# ----- Apache Kafka -----------------------------------------------------------
+
+# Set the namespace where Apache Kafka will be deployed.
+export KFK_NAMESPACE="kafka"
+
diff --git a/proto/acl.proto b/proto/acl.proto
index d777768819c4cc0ca03614b6928d9c2d9511b449..b45d46226d2706396f6d4c0e73ce72e15a75f2d5 100644
--- a/proto/acl.proto
+++ b/proto/acl.proto
@@ -46,6 +46,7 @@ message AclMatch {
   uint32 dst_port         = 6;
   uint32 start_mpls_label = 7;
   uint32 end_mpls_label   = 8;
+  string tcp_flags        = 9;
 }
 
 message AclAction {
diff --git a/proto/analytics_frontend.proto b/proto/analytics_frontend.proto
new file mode 100644
index 0000000000000000000000000000000000000000..096c1ee035ae663359d9f4df1e071d3997a0d351
--- /dev/null
+++ b/proto/analytics_frontend.proto
@@ -0,0 +1,69 @@
+// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package analytics_frontend;
+
+import "context.proto";
+import "kpi_manager.proto";
+//import "kpi_sample_types.proto";
+
+service AnalyticsFrontendService {
+  rpc StartAnalyzer  (Analyzer      ) returns (AnalyzerId   ) {}
+  rpc StopAnalyzer   (AnalyzerId    ) returns (context.Empty) {}
+  rpc SelectAnalyzers(AnalyzerFilter) returns (AnalyzerList ) {}
+}
+
+message AnalyzerId {
+  context.Uuid analyzer_id = 1;
+}
+
+enum AnalyzerOperationMode {
+  ANALYZEROPERATIONMODE_BATCH     = 0;
+  ANALYZEROPERATIONMODE_STREAMING = 1;
+}
+
+message Analyzer {
+  string                     algorithm_name       = 1; // The algorithm to be executed
+  repeated kpi_manager.KpiId input_kpi_ids        = 2; // The KPI Ids to be processed by the analyzer
+  repeated kpi_manager.KpiId output_kpi_ids       = 3; // The KPI Ids produced by the analyzer
+  AnalyzerOperationMode      operation_mode       = 4; // Operation mode of the analyzer
+
+  // In batch mode...
+  float                      batch_min_duration_s = 5; // ..., min duration to collect before executing batch
+  float                      batch_max_duration_s = 6; // ..., max duration collected to execute the batch
+  uint64                     batch_min_size       = 7; // ..., min number of samples to collect before executing batch
+  uint64                     batch_max_size       = 8; // ..., max number of samples collected to execute the batch
+}
+
+message AnalyzerFilter {
+  // Analyzer that fulfill the filter are those that match ALL the following fields.
+  // An empty list means: any value is accepted.
+  // All fields empty means: list all Analyzers
+  repeated AnalyzerId                     analyzer_id     = 1;
+  repeated string                         algorithm_names = 2;
+  repeated kpi_manager.KpiId              input_kpi_ids   = 3;
+  repeated kpi_manager.KpiId              output_kpi_ids  = 4;
+  //repeated kpi_sample_types.KpiSampleType kpi_sample_type = 5; // Not implemented
+  //repeated context.DeviceId               device_id       = 6; // Not implemented
+  //repeated context.EndPointId             endpoint_id     = 7; // Not implemented
+  //repeated context.ServiceId              service_id      = 8; // Not implemented
+  //repeated context.SliceId                slice_id        = 9; // Not implemented
+  //repeated context.ConnectionId           connection_id   = 10; // Not implemented
+  //repeated context.LinkId                 link_id         = 11; // Not implemented
+}
+
+message AnalyzerList {
+  repeated Analyzer analyzer_list = 1;
+}
diff --git a/proto/device.proto b/proto/device.proto
index 3d7ba14bb75e226c51d8d2462fca76a1cab86554..a1882f33f8e177502c456672a0517928f0259ef5 100644
--- a/proto/device.proto
+++ b/proto/device.proto
@@ -16,7 +16,7 @@ syntax = "proto3";
 package device;
 
 import "context.proto";
-import "monitoring.proto";
+import "monitoring.proto"; // to be migrated to: "kpi_manager.proto"
 
 service DeviceService {
   rpc AddDevice       (context.Device    ) returns (context.DeviceId    ) {}
@@ -27,8 +27,8 @@ service DeviceService {
 }
 
 message MonitoringSettings {
-  monitoring.KpiId kpi_id = 1;
-  monitoring.KpiDescriptor kpi_descriptor = 2;
-  float sampling_duration_s = 3;
-  float sampling_interval_s = 4;
+  monitoring.KpiId         kpi_id              = 1; // to be migrated to: "kpi_manager.KpiId"
+  monitoring.KpiDescriptor kpi_descriptor      = 2; // to be migrated to: "kpi_manager.KpiDescriptor"
+  float                    sampling_duration_s = 3;
+  float                    sampling_interval_s = 4;
 }
diff --git a/proto/kpi_manager.proto b/proto/kpi_manager.proto
new file mode 100644
index 0000000000000000000000000000000000000000..2640b58c60f004e51c8aeacc0ed76963f0436956
--- /dev/null
+++ b/proto/kpi_manager.proto
@@ -0,0 +1,60 @@
+// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package kpi_manager;
+
+import "context.proto";
+import "kpi_sample_types.proto";
+
+service KpiManagerService {
+  rpc SetKpiDescriptor    (KpiDescriptor      ) returns (KpiId               ) {}
+  rpc DeleteKpiDescriptor (KpiId              ) returns (context.Empty       ) {}
+  rpc GetKpiDescriptor    (KpiId              ) returns (KpiDescriptor       ) {}
+  rpc SelectKpiDescriptor (KpiDescriptorFilter) returns (KpiDescriptorList   ) {}
+}
+
+message KpiId {
+  context.Uuid kpi_id = 1;
+}
+
+message KpiDescriptor {
+  KpiId                          kpi_id          = 1;
+  string                         kpi_description = 2;
+  kpi_sample_types.KpiSampleType kpi_sample_type = 3;
+  context.DeviceId               device_id       = 4;
+  context.EndPointId             endpoint_id     = 5;
+  context.ServiceId              service_id      = 6;
+  context.SliceId                slice_id        = 7;
+  context.ConnectionId           connection_id   = 8;
+  context.LinkId                 link_id         = 9;
+}
+
+message KpiDescriptorFilter {
+  // KPI Descriptors that fulfill the filter are those that match ALL the following fields.
+  // An empty list means: any value is accepted.
+  // All fields empty means: list all KPI Descriptors
+  repeated KpiId                          kpi_id          = 1;
+  repeated kpi_sample_types.KpiSampleType kpi_sample_type = 2;
+  repeated context.DeviceId               device_id       = 3;
+  repeated context.EndPointId             endpoint_id     = 4;
+  repeated context.ServiceId              service_id      = 5;
+  repeated context.SliceId                slice_id        = 6;
+  repeated context.ConnectionId           connection_id   = 7;
+  repeated context.LinkId                 link_id         = 8;
+}
+
+message KpiDescriptorList {
+  repeated KpiDescriptor kpi_descriptor_list = 1;
+}
diff --git a/proto/kpi_value_api.proto b/proto/kpi_value_api.proto
new file mode 100644
index 0000000000000000000000000000000000000000..dff96272e3d05756dd19a49ecaede7311b196540
--- /dev/null
+++ b/proto/kpi_value_api.proto
@@ -0,0 +1,52 @@
+// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package kpi_value_api;
+
+import "context.proto";
+import "kpi_manager.proto";
+
+service KpiValueAPIService {
+	rpc StoreKpiValues  (KpiValueList)   returns (context.Empty) {}
+	rpc SelectKpiValues (KpiValueFilter) returns (KpiValueList)  {}
+}
+
+message KpiValue {
+	kpi_manager.KpiId kpi_id         = 1;
+	context.Timestamp timestamp      = 2;
+	KpiValueType      kpi_value_type = 3;
+}
+
+message KpiValueList {
+	repeated KpiValue kpi_value_list = 1;
+}
+
+message KpiValueType {
+  oneof value {
+    int32  int32Val  = 1;
+    uint32 uint32Val = 2;
+    int64  int64Val  = 3;
+    uint64 uint64Val = 4;
+    float  floatVal  = 5;
+    string stringVal = 6;
+    bool   boolVal   = 7;
+  }
+}
+
+message KpiValueFilter {
+	repeated kpi_manager.KpiId kpi_id          = 1;
+	repeated context.Timestamp start_timestamp = 2;
+	repeated context.Timestamp end_timestamp   = 3;
+}
diff --git a/proto/monitoring.proto b/proto/monitoring.proto
old mode 100644
new mode 100755
index 2c1c2f8ad58192586c17e310e33bccebbe775ee8..083bd82854547478d3a8f4a8935fdf75e9070d9d
--- a/proto/monitoring.proto
+++ b/proto/monitoring.proto
@@ -145,12 +145,12 @@ message SubsList {
 }
 
 message AlarmDescriptor {
-  AlarmID                     alarm_id              = 1;
-  string                      alarm_description     = 2;
-  string                      name                  = 3;
-  KpiId                       kpi_id                = 4;
-  KpiValueRange               kpi_value_range       = 5;
-  context.Timestamp           timestamp             = 6;
+  AlarmID           alarm_id          = 1;
+  string            alarm_description = 2;
+  string            name              = 3;
+  KpiId             kpi_id            = 4;
+  KpiValueRange     kpi_value_range   = 5;
+  context.Timestamp timestamp         = 6;
 }
 
 message AlarmID{
@@ -170,5 +170,5 @@ message AlarmResponse {
 }
 
 message AlarmList {
-    repeated AlarmDescriptor alarm_descriptor = 1;
+  repeated AlarmDescriptor alarm_descriptor = 1;
 }
diff --git a/proto/optical_attack_detector.proto b/proto/optical_attack_detector.proto
index 783e23b35d754db983c75c56dadc203996beadd4..f74eea68b8c5a588f5ecc06a59916058cb8d9695 100644
--- a/proto/optical_attack_detector.proto
+++ b/proto/optical_attack_detector.proto
@@ -12,12 +12,11 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// protocol buffers documentation: https://developers.google.com/protocol-buffers/docs/proto3
 syntax = "proto3";
 package optical_attack_detector;
 
 import "context.proto";
-import "monitoring.proto";
+import "monitoring.proto"; // to be migrated to: "kpi_manager.proto"
 
 service OpticalAttackDetectorService {
   
@@ -28,5 +27,5 @@ service OpticalAttackDetectorService {
 
 message DetectionRequest {
   context.ServiceId service_id = 1;
-  monitoring.KpiId  kpi_id     = 2;
+  monitoring.KpiId  kpi_id     = 2; // to be migrated to: "kpi_manager.KpiId"
 }
diff --git a/proto/policy_condition.proto b/proto/policy_condition.proto
index add3ec1ab127674e171c366ffa49346892b3ff0d..612dcb1af8eb8adb0db65b8ae47301c87ad6b9ef 100644
--- a/proto/policy_condition.proto
+++ b/proto/policy_condition.proto
@@ -15,13 +15,13 @@
 syntax = "proto3";
 package policy;
 
-import "monitoring.proto";
+import "monitoring.proto"; // to be migrated to: "kpi_manager.proto"
 
 // Condition
 message PolicyRuleCondition {
-  monitoring.KpiId kpiId = 1;
-  NumericalOperator numericalOperator = 2;
-  monitoring.KpiValue kpiValue = 3;
+  monitoring.KpiId    kpiId             = 1;  // to be migrated to: "kpi_manager.KpiId"
+  NumericalOperator   numericalOperator = 2;
+  monitoring.KpiValue kpiValue          = 3;
 }
 
 // Operator to be used when comparing Kpis with condition values
diff --git a/proto/telemetry_frontend.proto b/proto/telemetry_frontend.proto
new file mode 100644
index 0000000000000000000000000000000000000000..dbc1e8bf688f9f2df341484c1929e2338c458bbf
--- /dev/null
+++ b/proto/telemetry_frontend.proto
@@ -0,0 +1,48 @@
+// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package telemetry_frontend;
+
+import "context.proto";
+import "kpi_manager.proto";
+
+service TelemetryFrontendService {
+  rpc StartCollector  (Collector      ) returns (CollectorId  ) {}
+  rpc StopCollector   (CollectorId    ) returns (context.Empty) {}
+  rpc SelectCollectors(CollectorFilter) returns (CollectorList) {}
+}
+
+message CollectorId {
+  context.Uuid collector_id = 1;
+}
+
+message Collector {
+  CollectorId       collector_id = 1; // The Collector ID
+  kpi_manager.KpiId kpi_id       = 2; // The KPI Id to be associated to the collected samples
+  float             duration_s   = 3; // Terminate data collection after duration[seconds]; duration==0 means indefinitely
+  float             interval_s   = 4; // Interval between collected samples
+}
+
+message CollectorFilter {
+  // Collector that fulfill the filter are those that match ALL the following fields.
+  // An empty list means: any value is accepted.
+  // All fields empty means: list all Collectors
+  repeated CollectorId       collector_id = 1;
+  repeated kpi_manager.KpiId kpi_id       = 2;
+}
+
+message CollectorList {
+  repeated Collector collector_list = 1;
+}
diff --git a/scripts/run_tests_locally-device-openconfig-ocnos.sh b/scripts/run_tests_locally-device-openconfig-ocnos.sh
new file mode 100755
index 0000000000000000000000000000000000000000..60af6768d37199c957d17c6804c8af1072d0b0e1
--- /dev/null
+++ b/scripts/run_tests_locally-device-openconfig-ocnos.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+# Run unitary tests and analyze coverage of code at same time
+# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO -o log_cli=true --verbose \
+    device/tests/test_unitary_openconfig_ocnos.py
diff --git a/scripts/run_tests_locally-kpi-DB.sh b/scripts/run_tests_locally-kpi-DB.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4953b49e0a437becfda1648c722bcdcf92c58d93
--- /dev/null
+++ b/scripts/run_tests_locally-kpi-DB.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+# RCFILE=$PROJECTDIR/coverage/.coveragerc
+# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+#     kpi_manager/tests/test_unitary.py
+
+# python3 kpi_manager/tests/test_unitary.py
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
+export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
+python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
+    kpi_manager/tests/test_kpi_db.py
diff --git a/scripts/run_tests_locally-kpi-manager.sh b/scripts/run_tests_locally-kpi-manager.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a6a24f90db93d56300ac997bd00675c479ef13ae
--- /dev/null
+++ b/scripts/run_tests_locally-kpi-manager.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+# RCFILE=$PROJECTDIR/coverage/.coveragerc
+# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+#     kpi_manager/tests/test_unitary.py
+
+# python3 kpi_manager/tests/test_unitary.py
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
+export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
+python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
+    kpi_manager/tests/test_kpi_manager.py
diff --git a/scripts/run_tests_locally-kpi-prom-writer.sh b/scripts/run_tests_locally-kpi-prom-writer.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8865a8a34495a032525c7585a409f4c32c7249df
--- /dev/null
+++ b/scripts/run_tests_locally-kpi-prom-writer.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+CRDB_SQL_ADDRESS=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.clusterIP}')
+export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require"
+python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
+    kpi_value_writer/tests/test_metric_writer_to_prom.py
diff --git a/scripts/run_tests_locally-kpi-value-API.sh b/scripts/run_tests_locally-kpi-value-API.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8dfbfb16237634519dcae2fcc34f850a5188c1e7
--- /dev/null
+++ b/scripts/run_tests_locally-kpi-value-API.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
+python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG -o log_cli=true --verbose \
+    kpi_value_api/tests/test_kpi_value_api.py
diff --git a/scripts/run_tests_locally-kpi-value-writer.sh b/scripts/run_tests_locally-kpi-value-writer.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8faaeb6d895a240278d7ceb0c5c0b2855fa25910
--- /dev/null
+++ b/scripts/run_tests_locally-kpi-value-writer.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \
+    kpi_value_writer/tests/test_kpi_value_writer.py
diff --git a/scripts/run_tests_locally-telemetry-DB.sh b/scripts/run_tests_locally-telemetry-DB.sh
new file mode 100755
index 0000000000000000000000000000000000000000..bb1c48b76440c00b398875a8f704c2a82ba4ab50
--- /dev/null
+++ b/scripts/run_tests_locally-telemetry-DB.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+# RCFILE=$PROJECTDIR/coverage/.coveragerc
+# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+#     kpi_manager/tests/test_unitary.py
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+python3 -m pytest --log-cli-level=INFO --verbose \
+    telemetry/database/tests/telemetryDBtests.py
diff --git a/scripts/run_tests_locally-telemetry-backend.sh b/scripts/run_tests_locally-telemetry-backend.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9cf404ffcef6c99b261f81eb0c6b910dd60845e5
--- /dev/null
+++ b/scripts/run_tests_locally-telemetry-backend.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+# RCFILE=$PROJECTDIR/coverage/.coveragerc
+# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+#     kpi_manager/tests/test_unitary.py
+
+# python3 kpi_manager/tests/test_unitary.py
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+python3 -m pytest --log-level=INFO --log-cli-level=INFO --verbose \
+    telemetry/backend/tests/testTelemetryBackend.py
diff --git a/scripts/run_tests_locally-telemetry-frontend.sh b/scripts/run_tests_locally-telemetry-frontend.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7652ccb583268285dcd2fcf3090b717dc18e4fc3
--- /dev/null
+++ b/scripts/run_tests_locally-telemetry-frontend.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+# RCFILE=$PROJECTDIR/coverage/.coveragerc
+# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+#     kpi_manager/tests/test_unitary.py
+
+# python3 kpi_manager/tests/test_unitary.py
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+python3 -m pytest --log-level=INFO --log-cli-level=INFO --verbose \
+    telemetry/frontend/tests/test_frontend.py
diff --git a/scripts/run_tests_locally-telemetry-mgtDB.sh b/scripts/run_tests_locally-telemetry-mgtDB.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8b68104eaf343b57ec4953334cda37167cca3529
--- /dev/null
+++ b/scripts/run_tests_locally-telemetry-mgtDB.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+# RCFILE=$PROJECTDIR/coverage/.coveragerc
+# coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+#     kpi_manager/tests/test_unitary.py
+
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+python3 -m pytest --log-cli-level=INFO --verbose \
+    telemetry/database/tests/managementDBtests.py
diff --git a/scripts/show_logs_kpi_manager.sh b/scripts/show_logs_kpi_manager.sh
new file mode 100755
index 0000000000000000000000000000000000000000..86f084f69f6babf5a90957f432b214e35a08c461
--- /dev/null
+++ b/scripts/show_logs_kpi_manager.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/kpi-managerservice -c server
diff --git a/scripts/show_logs_kpi_value_api.sh b/scripts/show_logs_kpi_value_api.sh
new file mode 100755
index 0000000000000000000000000000000000000000..041ad7f1ffb1a218af00d5d142024a5063d109c3
--- /dev/null
+++ b/scripts/show_logs_kpi_value_api.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/kpi-value-apiservice -c server
diff --git a/scripts/show_logs_kpi_value_writer.sh b/scripts/show_logs_kpi_value_writer.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d62f3ea0a1a6961be4a5b6f4841c9ba4e1a89316
--- /dev/null
+++ b/scripts/show_logs_kpi_value_writer.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/kpi-value-writerservice -c server
diff --git a/scripts/show_logs_telemetry-DB.sh b/scripts/show_logs_telemetry-DB.sh
new file mode 100755
index 0000000000000000000000000000000000000000..84fc875d01e18eae9b144edaf220d5cb74017ea4
--- /dev/null
+++ b/scripts/show_logs_telemetry-DB.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"crdb"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+kubectl --namespace $TFS_K8S_NAMESPACE logs cockroachdb-0
diff --git a/src/bgpls_speaker/service/java/netphony-topology/doc/Examples.md b/src/bgpls_speaker/service/java/netphony-topology/doc/Examples.md
index 88f7a7bd5c7a268857a7a4ec2642c388daf715d3..f4faae268f75f96223b4c74571de695fada11497 100644
--- a/src/bgpls_speaker/service/java/netphony-topology/doc/Examples.md
+++ b/src/bgpls_speaker/service/java/netphony-topology/doc/Examples.md
@@ -1,4 +1,4 @@
-<!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+<!-- Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
diff --git a/src/bgpls_speaker/service/java/netphony-topology/doc/TAPIExample.md b/src/bgpls_speaker/service/java/netphony-topology/doc/TAPIExample.md
index 9b0c48c8ed24fe8ca5c06f118b3d440653c686e5..c7e975e864b042a1a4190f6090d5ed2ccee8ebf0 100644
--- a/src/bgpls_speaker/service/java/netphony-topology/doc/TAPIExample.md
+++ b/src/bgpls_speaker/service/java/netphony-topology/doc/TAPIExample.md
@@ -1,4 +1,4 @@
-<!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+<!-- Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
diff --git a/src/bgpls_speaker/service/java/netphony-topology/doc/TopologyFileDescription.md b/src/bgpls_speaker/service/java/netphony-topology/doc/TopologyFileDescription.md
index 452050b65106b8393ac8a7df98ea472b7705e608..ac9143d153d48d713210662249ffc15b833b4c83 100644
--- a/src/bgpls_speaker/service/java/netphony-topology/doc/TopologyFileDescription.md
+++ b/src/bgpls_speaker/service/java/netphony-topology/doc/TopologyFileDescription.md
@@ -1,4 +1,4 @@
-<!-- Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+<!-- Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
diff --git a/src/common/Constants.py b/src/common/Constants.py
index de9ac45a4089a7847c37ceeeeab000f51566a3a3..767b21343f89e35c2338b522bcdc71c56aca1815 100644
--- a/src/common/Constants.py
+++ b/src/common/Constants.py
@@ -58,9 +58,13 @@ class ServiceNameEnum(Enum):
     CACHING                = 'caching'
     TE                     = 'te'
     FORECASTER             = 'forecaster'
-    E2EORCHESTRATOR        = 'e2eorchestrator'
+    E2EORCHESTRATOR        = 'e2e-orchestrator'
     OPTICALCONTROLLER      = 'opticalcontroller'
     BGPLS                  = 'bgpls-speaker'
+    KPIMANAGER             = 'kpi-manager'
+    KPIVALUEAPI            = 'kpi-value-api'
+    KPIVALUEWRITER         = 'kpi-value-writer'
+    TELEMETRYFRONTEND      = 'telemetry-frontend'
 
     # Used for test and debugging only
     DLT_GATEWAY    = 'dltgateway'
@@ -90,6 +94,10 @@ DEFAULT_SERVICE_GRPC_PORTS = {
     ServiceNameEnum.E2EORCHESTRATOR        .value : 10050,
     ServiceNameEnum.OPTICALCONTROLLER      .value : 10060,
     ServiceNameEnum.BGPLS                  .value : 20030,
+    ServiceNameEnum.KPIMANAGER             .value : 30010,
+    ServiceNameEnum.KPIVALUEAPI            .value : 30020,
+    ServiceNameEnum.KPIVALUEWRITER         .value : 30030,
+    ServiceNameEnum.TELEMETRYFRONTEND      .value : 30050,
 
     # Used for test and debugging only
     ServiceNameEnum.DLT_GATEWAY   .value : 50051,
diff --git a/src/common/Settings.py b/src/common/Settings.py
index edc74c776d7818468c0162d26b03698aa3ef25ef..eaeb363adc1d9eadb9ddb0487abef8a0885ce380 100644
--- a/src/common/Settings.py
+++ b/src/common/Settings.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging, os, time
+import logging, os, re, time
 from typing import Dict, List
 from common.Constants import (
     DEFAULT_GRPC_BIND_ADDRESS, DEFAULT_GRPC_GRACE_PERIOD, DEFAULT_GRPC_MAX_WORKERS, DEFAULT_HTTP_BIND_ADDRESS,
@@ -68,7 +68,8 @@ def get_setting(name, **kwargs):
     raise Exception('Setting({:s}) not specified in environment or configuration'.format(str(name)))
 
 def get_env_var_name(service_name : ServiceNameEnum, env_var_group):
-    return ('{:s}SERVICE_{:s}'.format(service_name.value, env_var_group)).upper()
+    service_name = re.sub(r'[^a-zA-Z0-9]', '_', service_name.value)
+    return ('{:s}SERVICE_{:s}'.format(service_name, env_var_group)).upper()
 
 def get_service_host(service_name : ServiceNameEnum):
     envvar_name = get_env_var_name(service_name, ENVVAR_SUFIX_SERVICE_HOST)
diff --git a/src/common/tools/kafka/Variables.py b/src/common/tools/kafka/Variables.py
new file mode 100644
index 0000000000000000000000000000000000000000..24ae2cff7b5e710e18999eb09029216a4a5d6c8a
--- /dev/null
+++ b/src/common/tools/kafka/Variables.py
@@ -0,0 +1,74 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from enum import Enum
+from confluent_kafka import KafkaException
+from confluent_kafka.admin import AdminClient, NewTopic
+
+
+LOGGER = logging.getLogger(__name__)
+
+class KafkaConfig(Enum):
+    # SERVER_IP    = "127.0.0.1:9092"
+    SERVER_IP    = "kafka-service.kafka.svc.cluster.local:9092"
+    ADMIN_CLIENT =  AdminClient({'bootstrap.servers': SERVER_IP})
+
+class KafkaTopic(Enum):
+    REQUEST  = 'topic_request' 
+    RESPONSE = 'topic_response'
+    RAW      = 'topic_raw' 
+    LABELED  = 'topic_labeled'
+    VALUE    = 'topic_value'
+
+    @staticmethod
+    def create_all_topics() -> bool:
+        """
+            Method to create Kafka topics defined as class members
+        """
+        all_topics = [member.value for member in KafkaTopic]
+        if( KafkaTopic.create_new_topic_if_not_exists( all_topics )):
+            LOGGER.debug("All topics are created sucsessfully")
+            return True
+        else:
+            LOGGER.debug("Error creating all topics")
+            return False
+    
+    @staticmethod
+    def create_new_topic_if_not_exists(new_topics: list) -> bool:
+        """
+        Method to create Kafka topic if it does not exist.
+        Args:
+            list of topic: containing the topic name(s) to be created on Kafka
+        """
+        LOGGER.debug("Topics names to be verified and created: {:}".format(new_topics))
+        for topic in new_topics:
+            try:
+                topic_metadata = KafkaConfig.ADMIN_CLIENT.value.list_topics(timeout=5)
+                # LOGGER.debug("Existing topic list: {:}".format(topic_metadata.topics))
+                if topic not in topic_metadata.topics:
+                    # If the topic does not exist, create a new topic
+                    print("Topic {:} does not exist. Creating...".format(topic))
+                    LOGGER.debug("Topic {:} does not exist. Creating...".format(topic))
+                    new_topic = NewTopic(topic, num_partitions=1, replication_factor=1)
+                    KafkaConfig.ADMIN_CLIENT.value.create_topics([new_topic])
+                else:
+                    print("Topic name already exists: {:}".format(topic))
+                    LOGGER.debug("Topic name already exists: {:}".format(topic))
+            except Exception as e:
+                LOGGER.debug("Failed to create topic: {:}".format(e))
+                return False
+        return True
+
+# create all topics after the deployments (Telemetry and Analytics)
diff --git a/src/device/requirements.in b/src/device/requirements.in
index 73ea741d16dcdafd7a9be87ad79b457ccb6c5d5e..bf5e6a2b3128f438a7c044c3f3cf9ee393de2265 100644
--- a/src/device/requirements.in
+++ b/src/device/requirements.in
@@ -23,7 +23,8 @@ Flask==2.1.3
 Flask-HTTPAuth==4.5.0
 Flask-RESTful==0.3.9
 Jinja2==3.0.3
-ncclient==0.6.13
+numpy<2.0.0
+ncclient==0.6.15
 p4runtime==1.3.0
 pandas==1.5.*
 paramiko==2.9.2
diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py
index a592b51576acc21e6dc055fe9f41e720f28aae1c..fd36e2dc40e38a125f1812f00eeb304106a40c8a 100644
--- a/src/device/service/drivers/openconfig/OpenConfigDriver.py
+++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py
@@ -12,6 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import time
 import json
 import anytree, copy, logging, pytz, queue, re, threading
 #import lxml.etree as ET
@@ -237,6 +238,8 @@ def edit_config(
                         test_option=test_option, error_option=error_option, format=format)
                     if commit_per_rule:
                         netconf_handler.commit()                                                                               # configuration commit
+                    if 'table_connections' in resource_key:
+                        time.sleep(5) # CPU usage might exceed critical level after route redistribution, BGP daemon needs time to reload
                 
                 #results[i] = True
                 results.append(True)
diff --git a/src/device/service/drivers/openconfig/templates/Acl.py b/src/device/service/drivers/openconfig/templates/Acl.py
index cc3da6434fba0442fc11a33b4d8e380ad4e50bd8..e0e778f11415a2dcccd18f9b61166a68a7cf4fc2 100644
--- a/src/device/service/drivers/openconfig/templates/Acl.py
+++ b/src/device/service/drivers/openconfig/templates/Acl.py
@@ -20,7 +20,7 @@ from .Tools import add_value_from_tag
 LOGGER = logging.getLogger(__name__)
 
 XPATH_ACL_SET     = "//ocacl:acl/ocacl:acl-sets/ocacl:acl-set"
-XPATH_A_ACL_ENTRY = ".//ocacl:acl-entries/ocacl:ecl-entry"
+XPATH_A_ACL_ENTRY = ".//ocacl:acl-entries/ocacl:acl-entry"
 XPATH_A_IPv4      = ".//ocacl:ipv4/ocacl:config"
 XPATH_A_TRANSPORT = ".//ocacl:transport/ocacl:config"
 XPATH_A_ACTIONS   = ".//ocacl:actions/ocacl:config"
@@ -34,29 +34,31 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
 
     response = []
     acl = {}
+    name = {}
 
     for xml_acl in xml_data.xpath(XPATH_ACL_SET, namespaces=NAMESPACES):
         #LOGGER.info('xml_acl = {:s}'.format(str(ET.tostring(xml_acl))))
 
         acl_name = xml_acl.find('ocacl:name', namespaces=NAMESPACES)
         if acl_name is None or acl_name.text is None: continue
-        add_value_from_tag(acl, 'name', acl_name)
+        add_value_from_tag(name, 'name', acl_name)
 
         acl_type = xml_acl.find('ocacl:type', namespaces=NAMESPACES)
         add_value_from_tag(acl, 'type', acl_type)
 
         for xml_acl_entries in xml_acl.xpath(XPATH_A_ACL_ENTRY, namespaces=NAMESPACES):
 
-            acl_id = xml_acl_entries.find('ocacl:sequence_id', namespaces=NAMESPACES)
-            add_value_from_tag(acl, 'sequence_id', acl_id)
+            acl_id = xml_acl_entries.find('ocacl:sequence-id', namespaces=NAMESPACES)
+            add_value_from_tag(acl, 'sequence-id', acl_id)
+            LOGGER.info('xml_acl_id = {:s}'.format(str(ET.tostring(acl_id))))
 
             for xml_ipv4 in xml_acl_entries.xpath(XPATH_A_IPv4, namespaces=NAMESPACES):
 
-                ipv4_source = xml_ipv4.find('ocacl:source_address', namespaces=NAMESPACES)
-                add_value_from_tag(acl, 'source_address' , ipv4_source)
+                ipv4_source = xml_ipv4.find('ocacl:source-address', namespaces=NAMESPACES)
+                add_value_from_tag(acl, 'source-address' , ipv4_source)
 
-                ipv4_destination = xml_ipv4.find('ocacl:destination_address', namespaces=NAMESPACES)
-                add_value_from_tag(acl, 'destination_address' , ipv4_destination)
+                ipv4_destination = xml_ipv4.find('ocacl:destination-address', namespaces=NAMESPACES)
+                add_value_from_tag(acl, 'destination-address' , ipv4_destination)
 
                 ipv4_protocol = xml_ipv4.find('ocacl:protocol', namespaces=NAMESPACES)
                 add_value_from_tag(acl, 'protocol' , ipv4_protocol)
@@ -64,30 +66,30 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
                 ipv4_dscp = xml_ipv4.find('ocacl:dscp', namespaces=NAMESPACES)
                 add_value_from_tag(acl, 'dscp' , ipv4_dscp)
 
-                ipv4_hop_limit = xml_ipv4.find('ocacl:hop_limit', namespaces=NAMESPACES)
-                add_value_from_tag(acl, 'hop_limit' , ipv4_hop_limit)
+                ipv4_hop_limit = xml_ipv4.find('ocacl:hop-limit', namespaces=NAMESPACES)
+                add_value_from_tag(acl, 'hop-limit' , ipv4_hop_limit)
 
             for xml_transport in xml_acl_entries.xpath(XPATH_A_TRANSPORT, namespaces=NAMESPACES):
 
-                transport_source = xml_transport.find('ocacl:source_port', namespaces=NAMESPACES)
-                add_value_from_tag(acl, 'source_port' ,transport_source)
+                transport_source = xml_transport.find('ocacl:source-port', namespaces=NAMESPACES)
+                add_value_from_tag(acl, 'source-port' ,transport_source)
 
-                transport_destination = xml_transport.find('ocacl:destination_port', namespaces=NAMESPACES)
-                add_value_from_tag(acl, 'destination_port' ,transport_destination)
+                transport_destination = xml_transport.find('ocacl:destination-port', namespaces=NAMESPACES)
+                add_value_from_tag(acl, 'destination-port' ,transport_destination)
 
-                transport_tcp_flags = xml_transport.find('ocacl:tcp_flags', namespaces=NAMESPACES)
-                add_value_from_tag(acl, 'tcp_flags' ,transport_tcp_flags)
+                transport_tcp_flags = xml_transport.find('ocacl:tcp-flags', namespaces=NAMESPACES)
+                add_value_from_tag(acl, 'tcp-flags' ,transport_tcp_flags)
 
             for xml_action in xml_acl_entries.xpath(XPATH_A_ACTIONS, namespaces=NAMESPACES):
 
-                action = xml_action.find('ocacl:forwarding_action', namespaces=NAMESPACES)
-                add_value_from_tag(acl, 'forwarding_action' ,action)
+                action = xml_action.find('ocacl:forwarding-action', namespaces=NAMESPACES)
+                add_value_from_tag(acl, 'forwarding-action' ,action)
 
-                log_action = xml_action.find('ocacl:log_action', namespaces=NAMESPACES)
-                add_value_from_tag(acl, 'log_action' ,log_action)
+                log_action = xml_action.find('ocacl:log-action', namespaces=NAMESPACES)
+                add_value_from_tag(acl, 'log-action' ,log_action)
 
             resource_key =  '/acl/acl-set[{:s}][{:s}]/acl-entry[{:s}]'.format(
-                acl['name'], acl['type'], acl['sequence-id'])
+                name['name'], acl['type'], acl['sequence-id'])
             response.append((resource_key,acl))
 
     for xml_interface in xml_data.xpath(XPATH_INTERFACE, namespaces=NAMESPACES):
@@ -99,25 +101,25 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
 
         for xml_ingress in xml_interface.xpath(XPATH_I_INGRESS, namespaces=NAMESPACES):
 
-            i_name = xml_ingress.find('ocacl:set_name_ingress', namespaces=NAMESPACES)
-            add_value_from_tag(interface, 'ingress_set_name' , i_name)
+            i_name = xml_ingress.find('ocacl:set-name-ingress', namespaces=NAMESPACES)
+            add_value_from_tag(interface, 'ingress-set-name' , i_name)
 
-            i_type = xml_ingress.find('ocacl:type_ingress', namespaces=NAMESPACES)
-            add_value_from_tag(interface, 'ingress_type' , i_type)
+            i_type = xml_ingress.find('ocacl:type-ingress', namespaces=NAMESPACES)
+            add_value_from_tag(interface, 'ingress-type' , i_type)
 
             resource_key =  '/acl/interfaces/ingress[{:s}][{:s}]'.format(
-                acl['name'], acl['type'])
+                name['name'], acl['type'])
             response.append((resource_key,interface))
 
         for xml_egress in xml_interface.xpath(XPATH_I_EGRESS, namespaces=NAMESPACES):
 
-            e_name = xml_egress.find('ocacl:set_name_egress', namespaces=NAMESPACES)
-            add_value_from_tag(interface, 'egress_set_name' , e_name)
+            e_name = xml_egress.find('ocacl:set-name-egress', namespaces=NAMESPACES)
+            add_value_from_tag(interface, 'egress-set-name' , e_name)
 
-            e_type = xml_egress.find('ocacl:type_egress', namespaces=NAMESPACES)
-            add_value_from_tag(interface, 'egress_type' , e_type)
+            e_type = xml_egress.find('ocacl:type-egress', namespaces=NAMESPACES)
+            add_value_from_tag(interface, 'egress-type' , e_type)
 
             resource_key =  '/acl/interfaces/egress[{:s}][{:s}]'.format(
-                acl['name'], acl['type'])
+                name['name'], acl['type'])
             response.append((resource_key,interface))
     return response
diff --git a/src/device/service/drivers/openconfig/templates/Inventory.py b/src/device/service/drivers/openconfig/templates/Inventory.py
index 9897f04f9df2dd6c1ce4010d9ad9878ae0d04242..4fca35bc39ea4a72a864f16832f944ed7ad21621 100644
--- a/src/device/service/drivers/openconfig/templates/Inventory.py
+++ b/src/device/service/drivers/openconfig/templates/Inventory.py
@@ -75,6 +75,10 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
         component_location = xml_component.find('ocp:state/ocp:location', namespaces=NAMESPACES)
         if not component_location is None:
             add_value_from_tag(inventory['attributes'], 'location', component_location)
+
+        component_id = xml_component.find('ocp:state/ocp:id', namespaces=NAMESPACES)
+        if not component_id is None:
+            add_value_from_tag(inventory['attributes'], 'id', component_id)
         
         component_type = xml_component.find('ocp:state/ocp:type', namespaces=NAMESPACES)
         if component_type is not None:
@@ -109,7 +113,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
 
         component_mfg_name = xml_component.find('ocp:state/ocp:mfg-name', namespaces=NAMESPACES)
         if not component_mfg_name is None:
-            add_value_from_tag(inventory['attributes'], 'manufacturer-name', component_mfg_name)
+            add_value_from_tag(inventory['attributes'], 'mfg-name', component_mfg_name)
         
         component_removable = xml_component.find('ocp:state/ocp:removable', namespaces=NAMESPACES)
         if not component_removable is None:
diff --git a/src/device/service/drivers/openconfig/templates/NetworkInstances.py b/src/device/service/drivers/openconfig/templates/NetworkInstances.py
index 7bed281812c4097124f4794a7d6232993b125957..97b55c817fdf002fe5f09852b203e5b14c600b06 100644
--- a/src/device/service/drivers/openconfig/templates/NetworkInstances.py
+++ b/src/device/service/drivers/openconfig/templates/NetworkInstances.py
@@ -23,6 +23,8 @@ XPATH_NETWORK_INSTANCES = "//ocni:network-instances/ocni:network-instance"
 XPATH_NI_PROTOCOLS      = ".//ocni:protocols/ocni:protocol"
 XPATH_NI_TABLE_CONNECTS = ".//ocni:table-connections/ocni:table-connection"
 
+XPATH_NI_INTERFACE      = ".//ocni:interfaces/ocni:interface"
+
 XPATH_NI_IIP_AP         = ".//ocni:inter-instance-policies/ocni:apply-policy"
 XPATH_NI_IIP_AP_IMPORT  = ".//ocni:config/ocni:import-policy"
 XPATH_NI_IIP_AP_EXPORT  = ".//ocni:config/ocni:export-policy"
@@ -136,6 +138,21 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
                 table_connection['address_family'])
             response.append((resource_key, table_connection))
 
+        for xml_interface in xml_network_instance.xpath(XPATH_NI_INTERFACE, namespaces=NAMESPACES):
+            LOGGER.info('xml_interfaces = {:s}'.format(str(ET.tostring(xml_interface))))
+
+            interface = {}
+            name_iface = xml_interface.find('ocni:config/ocni:interface', namespaces=NAMESPACES)
+            if name_iface is None or name_iface.text is None: continue
+            add_value_from_tag(interface, 'name_iface', name_iface)
+            
+            name_subiface = xml_interface.find('ocni:config/ocni:subinterface', namespaces=NAMESPACES)
+            add_value_from_tag(interface, 'name_subiface', name_subiface)
+            
+            resource_key = '/network_instance[{:s}]/interface[{:s}]'.format(
+                network_instance['name'], interface['name_iface'])
+            response.append((resource_key, interface))
+
         for xml_iip_ap in xml_network_instance.xpath(XPATH_NI_IIP_AP, namespaces=NAMESPACES):
             #LOGGER.info('xml_iip_ap = {:s}'.format(str(ET.tostring(xml_iip_ap))))
 
diff --git a/src/device/service/drivers/openconfig/templates/Tools.py b/src/device/service/drivers/openconfig/templates/Tools.py
index b7a5ba9c1a962032fe13b4ec5cb70eae7ff604a1..c4ef22b1e3c11f1e512026bea8e2122ab703a9e5 100644
--- a/src/device/service/drivers/openconfig/templates/Tools.py
+++ b/src/device/service/drivers/openconfig/templates/Tools.py
@@ -61,7 +61,8 @@ def generate_templates(resource_key: str, resource_value: str, delete: bool,vend
         elif "inter_instance_policies" in resource_key:
             result_templates.append(associate_RP_to_NI(data))
         elif "protocols" in resource_key:
-            if vendor == "ADVA": result_templates.append(add_protocol_NI(data, vendor, delete))
+            if vendor is None or vendor == "ADVA":
+                result_templates.append(add_protocol_NI(data, vendor, delete))
         elif "table_connections" in resource_key:
             result_templates.append(create_table_conns(data, delete))
         elif "interface" in resource_key:
diff --git a/src/device/service/drivers/openconfig/templates/VPN/Interfaces_multivendor.py b/src/device/service/drivers/openconfig/templates/VPN/Interfaces_multivendor.py
index 09e3b618a69c738b57b4d2268c0429e6f8119147..ab57ce3bd26e9183f931a1a6e13a44a9a85bef7d 100644
--- a/src/device/service/drivers/openconfig/templates/VPN/Interfaces_multivendor.py
+++ b/src/device/service/drivers/openconfig/templates/VPN/Interfaces_multivendor.py
@@ -54,7 +54,7 @@ def create_If_SubIf(data,vendor, DEL):
                     with tag('enabled'):text('true')    
                 with tag('subinterfaces'):
                     with tag('subinterface'):
-                        if vendor == 'ADVA':
+                        if vendor is None or vendor == 'ADVA':
                             with tag('index'): text('0')
                         with tag('config'):
                             with tag('index'): text('0')
@@ -65,8 +65,10 @@ def create_If_SubIf(data,vendor, DEL):
                                 with tag('single-tagged'):
                                     with tag('config'):
                                         with tag('vlan-id'):text(data['vlan_id'])
-                        if "l3ipvlan" in data['type']: 
+                        if "l3ipvlan" in data['type'] and 'address_ip' in data: 
                             with tag('ipv4',  xmlns="http://openconfig.net/yang/interfaces/ip"):
+                                if 'mtu' in data:
+                                    with tag('mtu'):text(data['mtu'])
                                 with tag('addresses'):
                                     with tag('address'):
                                         with tag('ip'):text(data['address_ip'])
diff --git a/src/device/service/drivers/openconfig/templates/VPN/Network_instance_multivendor.py b/src/device/service/drivers/openconfig/templates/VPN/Network_instance_multivendor.py
index c2d18ef172bccaf46b4e323a1fef6ef048232888..157dd0ab89a0eb625d428dd95109faabc399bcf0 100644
--- a/src/device/service/drivers/openconfig/templates/VPN/Network_instance_multivendor.py
+++ b/src/device/service/drivers/openconfig/templates/VPN/Network_instance_multivendor.py
@@ -64,10 +64,12 @@ def create_NI(parameters,vendor,DEL):
                 elif "L3VRF" in parameters['type']: 
                     with tag('config'):
                         with tag('name'):text(parameters['name'])
-                        if vendor == "ADVA": 
+                        if "router_id" in parameters: 
+                            with tag('router-id'):text(parameters['router_id'])
+                        if vendor is None or vendor == 'ADVA':
                             with tag('type', 'xmlns:oc-ni-types="http://openconfig.net/yang/network-instance-types"'):text('oc-ni-types:',parameters['type'])
                             with tag('route-distinguisher'):text(parameters['route_distinguisher'])
-                    if vendor == "ADVA": 
+                    if vendor is None or vendor == 'ADVA':
                         with tag('encapsulation'):
                             with tag('config'):
                                 with tag('encapsulation-type', 'xmlns:oc-ni-types="http://openconfig.net/yang/network-instance-types"')  :text('oc-ni-types:MPLS')
@@ -123,14 +125,29 @@ def add_protocol_NI(parameters,vendor, DEL):
                         with tag('config'):
                             with tag('identifier', 'xmlns:oc-pol-types="http://openconfig.net/yang/policy-types"'):text('oc-pol-types:',parameters['identifier'])
                             with tag('name')      :text(parameters['protocol_name'])
+                            with tag('enabled'): text('true')
                         if "BGP" in parameters['identifier']:
                             with tag('bgp'):
+                                with tag('name'): text(parameters['as'])
                                 with tag('global'):
                                     with tag('config'):
                                         with tag('as')       :text(parameters['as'])
-                                        if "router-id" in parameters: 
-                                            with tag('router-id'):text(parameters['router-id'])
-                if vendor == "ADVA": 
+                                        if "router_id" in parameters: 
+                                            with tag('router-id'):text(parameters['router_id'])
+                                if 'neighbors' in parameters:
+                                    with tag('neighbors'):
+                                        for neighbor in parameters['neighbors']:
+                                            with tag('neighbor'):
+                                                with tag('neighbor-address'): text(neighbor['ip_address'])
+                                                with tag('afi-safis'):
+                                                    with tag('afi-safi', 'xmlns:oc-bgp-types="http://openconfig.net/yang/bgp-types"'):
+                                                        with tag('afi-safi-name'): text('oc-bgp-types:IPV4_UNICAST')
+                                                        with tag('enabled'): text('true')
+                                                with tag('config'):
+                                                    with tag('neighbor-address'): text(neighbor['ip_address'])
+                                                    with tag('enabled'): text('true')
+                                                    with tag('peer-as'): text(parameters['as'])
+                if vendor is None or vendor == 'ADVA':
                     with tag('tables'):
                       with tag('table'):
                           with tag('protocol', 'xmlns:oc-pol-types="http://openconfig.net/yang/policy-types"'):text('oc-pol-types:',parameters['identifier'])
@@ -177,6 +194,9 @@ def associate_If_to_NI(parameters, DEL):
         else:
             with tag('network-instance'):
                 with tag('name'):text(parameters['name'])
+                with tag('config'):
+                    with tag('name'):text(parameters['name'])
+                    with tag('type', 'xmlns:oc-ni-types="http://openconfig.net/yang/network-instance-types"'):text('oc-ni-types:',parameters['type'])
                 with tag('interfaces'):
                     with tag('interface'):
                         with tag('id'):text(parameters['id'])
@@ -315,7 +335,7 @@ def create_table_conns(parameters,DEL):
                     with tag('table-connection','xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="delete"'):
                       with tag('src-protocol','xmlns:oc-pol-types="http://openconfig.net/yang/policy-types"'):   text('oc-pol-types:',parameters['src_protocol'])
                       with tag('dst-protocol','xmlns:oc-pol-types="http://openconfig.net/yang/policy-types"'):   text('oc-pol-types:',parameters['dst_protocol'])
-                      with tag('address-family', 'xmlns:oc-types="http://openconfig.net/yang/openconfig-types"'):text('oc-types:',parameters['dst_protocol'])     
+                      with tag('address-family', 'xmlns:oc-types="http://openconfig.net/yang/openconfig-types"'):text('oc-types:',parameters['address_family'])     
             else:
                 with tag('table-connections'):
                     with tag('table-connection'):
@@ -326,6 +346,8 @@ def create_table_conns(parameters,DEL):
                         with tag('src-protocol','xmlns:oc-pol-types="http://openconfig.net/yang/policy-types"'):   text('oc-pol-types:',parameters['src_protocol'])
                         with tag('dst-protocol','xmlns:oc-pol-types="http://openconfig.net/yang/policy-types"'):   text('oc-pol-types:',parameters['dst_protocol'])
                         with tag('address-family', 'xmlns:oc-types="http://openconfig.net/yang/openconfig-types"'):text('oc-types:',parameters['address_family'])    
+                        # for OCNOS: check if needed
+                        #with tag('dst-instance', 'xmlns="http://www.ipinfusion.com/yang/ocnos/ipi-oc-ni-augments"'):text('65000')
                         if len(parameters['default_import_policy']) != 0:
                             with tag('default-import-policy'):text(parameters['default_import_policy'])
     result = indent(
diff --git a/src/device/service/drivers/openconfig/templates/VPN/Routing_policy.py b/src/device/service/drivers/openconfig/templates/VPN/Routing_policy.py
index 5cc8cc71de9a952eecc8b3df2d71b6d38c496eb9..69fdd2cc52ec179665b6fc5a766b04b0e6c2a6ae 100644
--- a/src/device/service/drivers/openconfig/templates/VPN/Routing_policy.py
+++ b/src/device/service/drivers/openconfig/templates/VPN/Routing_policy.py
@@ -133,14 +133,14 @@ data_2 =    {'ext_community_member'     : '65001:101',
             'ext_community_set_name'    : 'set_srv_101_a'}
 
 print('\nRouting Policy Statement - CREATE\n')
-print(rp_statement(data_1, False))
+print(create_rp_statement(data_1, False))
 print('\nRouting Policy Statement - DELETE\n')
-print(rp_statement(data_1, True))
+print(create_rp_statement(data_1, True))
 
 print('\nRouting Policy Defined Set - CREATE\n')
-print(rp_defined_set(data_2, False))
+print(create_rp_def(data_2, False))
 print('\nRouting Policy Defined Set - DELETE\n')
-print(rp_defined_set(data_2, True))
+print(create_rp_def(data_2, True))
 '''
 
 '''
diff --git a/src/device/service/drivers/openconfig/templates/__init__.py b/src/device/service/drivers/openconfig/templates/__init__.py
index 0c1a057e7c07bebb0e41e295e8d44082bc3ef236..a209d9607c8dca0b5ce09b7b98592a7cdb9b9aaf 100644
--- a/src/device/service/drivers/openconfig/templates/__init__.py
+++ b/src/device/service/drivers/openconfig/templates/__init__.py
@@ -27,6 +27,9 @@ from .NetworkInstances import parse as parse_network_instances
 from .RoutingPolicy import parse as parse_routing_policy
 from .Acl import parse as parse_acl
 from .Inventory import parse as parse_inventory
+from .acl.acl_adapter import acl_cr_to_dict
+from .acl.acl_adapter_ipinfusion_proprietary import acl_cr_to_dict_ipinfusion_proprietary
+
 LOGGER = logging.getLogger(__name__)
 
 ALL_RESOURCE_KEYS = [
@@ -112,16 +115,34 @@ def compose_config( # template generation
             ]
 
     elif (message_renderer == "jinja"):
-        templates =[]
-        template_name = '{:s}/edit_config.xml'.format(RE_REMOVE_FILTERS.sub('', resource_key))
-        templates.append(JINJA_ENV.get_template(template_name))
-
+        templates = []
         if "acl_ruleset" in resource_key:                                               # MANAGING ACLs
-            templates =[]
-            templates.append(JINJA_ENV.get_template('acl/acl-set/acl-entry/edit_config.xml'))
-            templates.append(JINJA_ENV.get_template('acl/interfaces/ingress/edit_config.xml'))
-        data : Dict[str, Any] = json.loads(resource_value)
-        operation = 'delete' if delete else 'merge'
+            if vendor == 'ipinfusion': # ipinfusion proprietary netconf receipe is used temporarily
+                enable_ingress_filter_path = 'acl/interfaces/ingress/enable_ingress_filter.xml'
+                acl_entry_path = 'acl/acl-set/acl-entry/edit_config_ipinfusion_proprietary.xml'
+                acl_ingress_path = 'acl/interfaces/ingress/edit_config_ipinfusion_proprietary.xml'
+                data : Dict[str, Any] = acl_cr_to_dict_ipinfusion_proprietary(resource_value, delete=delete)
+            else:
+                enable_ingress_filter_path = 'acl/interfaces/ingress/enable_ingress_filter.xml'
+                acl_entry_path = 'acl/acl-set/acl-entry/edit_config.xml'
+                acl_ingress_path = 'acl/interfaces/ingress/edit_config.xml'
+                data : Dict[str, Any] = acl_cr_to_dict(resource_value, delete=delete)
+
+            if delete: # unpair acl and interface before removing acl
+                templates.append(JINJA_ENV.get_template(acl_ingress_path))
+                templates.append(JINJA_ENV.get_template(acl_entry_path))
+                templates.append(JINJA_ENV.get_template(enable_ingress_filter_path))
+            else:
+                templates.append(JINJA_ENV.get_template(enable_ingress_filter_path))
+                templates.append(JINJA_ENV.get_template(acl_entry_path))
+                templates.append(JINJA_ENV.get_template(acl_ingress_path))
+        else:
+            template_name = '{:s}/edit_config.xml'.format(RE_REMOVE_FILTERS.sub('', resource_key))
+            templates.append(JINJA_ENV.get_template(template_name))
+            data : Dict[str, Any] = json.loads(resource_value)
+
+        operation = 'delete' if delete else 'merge' # others
+        #operation = 'delete' if delete else '' # ipinfusion?
 
         return [
             '<config>{:s}</config>'.format(
diff --git a/src/device/service/drivers/openconfig/templates/acl/__init__.py b/src/device/service/drivers/openconfig/templates/acl/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..839e45e3b646bc60de7edd81fcfb91b7b38feadf
--- /dev/null
+++ b/src/device/service/drivers/openconfig/templates/acl/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
\ No newline at end of file
diff --git a/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config_ipinfusion_proprietary.xml b/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config_ipinfusion_proprietary.xml
new file mode 100644
index 0000000000000000000000000000000000000000..d0210a66c1b5d7de1a4be479cd79e9b48131e2a0
--- /dev/null
+++ b/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config_ipinfusion_proprietary.xml
@@ -0,0 +1,34 @@
+<acl xmlns="http://www.ipinfusion.com/yang/ocnos/ipi-acl">
+  <acl-sets>
+    <acl-set {% if operation == 'delete' %}operation="delete"{% endif %}>
+      <name>{{name}}</name>
+      {% if type is defined %}<type>{{type}}</type>{% endif %}
+      <config>
+        <name>{{name}}</name>
+        {% if type is defined %}<type>{{type}}</type>{% endif %}
+      </config>
+      {% if operation != 'delete' %}
+      <acl-entries>
+        <acl-entry>
+          <sequence-id>{{sequence_id}}</sequence-id>
+          <config>
+            <sequence-id>{{sequence_id}}</sequence-id>
+          </config>
+          <ipv4>
+            <config>
+              <source-address>{{source_address}}</source-address>
+              <destination-address>{{destination_address}}</destination-address>
+              <dscp>{{dscp}}</dscp>
+              <protocol-tcp />
+              <tcp-source-port>{{source_port}}</tcp-source-port>
+              <tcp-destination-port>{{destination_port}}</tcp-destination-port>
+              <tcp-flags>{{tcp_flags}}</tcp-flags>
+              <forwarding-action>{{forwarding_action}}</forwarding-action>
+            </config>
+          </ipv4>
+        </acl-entry>
+      </acl-entries>
+      {% endif %}
+    </acl-set>
+  </acl-sets>
+</acl>
\ No newline at end of file
diff --git a/src/device/service/drivers/openconfig/templates/acl/acl_adapter.py b/src/device/service/drivers/openconfig/templates/acl/acl_adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..15e723680c355d58b84d0a1677be3f21a0fb95ed
--- /dev/null
+++ b/src/device/service/drivers/openconfig/templates/acl/acl_adapter.py
@@ -0,0 +1,73 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, TypedDict
+
+from ..ACL.ACL_multivendor import RULE_TYPE_MAPPING, FORWARDING_ACTION_MAPPING, LOG_ACTION_MAPPING
+
+class ACLRequestData(TypedDict):
+    name: str  # acl-set name
+    type: str  # acl-set type
+    sequence_id: int  # acl-entry sequence-id
+    source_address: str
+    destination_address: str
+    forwarding_action: str
+    id: str  # interface id
+    interface: str
+    subinterface: int
+    set_name_ingress: str  # ingress-acl-set name
+    type_ingress: str  # ingress-acl-set type
+    all: bool
+    dscp: int
+    protocol: int
+    tcp_flags: str
+    source_port: int
+    destination_port: int
+
+def acl_cr_to_dict(acl_cr_dict: Dict, subinterface:int = 0) -> Dict:
+    rule_set = acl_cr_dict['rule_set']
+    rule_set_entry = rule_set['entries'][0]
+    rule_set_entry_match = rule_set_entry['match']
+    rule_set_entry_action = rule_set_entry['action']
+
+    name: str = rule_set['name']
+    type: str = RULE_TYPE_MAPPING[rule_set["type"]]
+    sequence_id = rule_set_entry['sequence_id']
+    source_address = rule_set_entry_match['src_address']
+    destination_address = rule_set_entry_match['dst_address']
+    forwarding_action: str = FORWARDING_ACTION_MAPPING[rule_set_entry_action['forward_action']]
+    interface_id = acl_cr_dict['interface']
+    interface = interface_id
+    set_name_ingress = name
+    type_ingress = type
+
+    return ACLRequestData(
+        name=name,
+        type=type,
+        sequence_id=sequence_id,
+        source_address=source_address,
+        destination_address=destination_address,
+        forwarding_action=forwarding_action,
+        id=interface_id,
+        interface=interface,
+        # subinterface=subinterface,
+        set_name_ingress=set_name_ingress,
+        type_ingress=type_ingress,
+        all=True,
+        dscp=18,
+        protocol=6,
+        tcp_flags='TCP_SYN',
+        source_port=22,
+        destination_port=80
+    )
diff --git a/src/device/service/drivers/openconfig/templates/acl/acl_adapter_ipinfusion_proprietary.py b/src/device/service/drivers/openconfig/templates/acl/acl_adapter_ipinfusion_proprietary.py
new file mode 100644
index 0000000000000000000000000000000000000000..52213c2aba9a128ace4e927a5f01f9be278442b6
--- /dev/null
+++ b/src/device/service/drivers/openconfig/templates/acl/acl_adapter_ipinfusion_proprietary.py
@@ -0,0 +1,63 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, TypedDict
+
+
+RULE_TYPE_MAPPING = {
+    'ACLRULETYPE_IPV4'     : 'ip',
+}
+
+FORWARDING_ACTION_MAPPING = {
+    'ACLFORWARDINGACTION_DROP'     : 'deny',
+    'ACLFORWARDINGACTION_ACCEPT'   : 'permit',
+}
+
+class ACLRequestData(TypedDict):
+    name: str  # acl-set name
+    type: str  # acl-set type
+    sequence_id: int  # acl-entry sequence-id
+    source_address: str
+    destination_address: str
+    forwarding_action: str
+    interface: str
+    dscp: int
+    tcp_flags: str
+    source_port: int
+    destination_port: int
+
+def acl_cr_to_dict_ipinfusion_proprietary(acl_cr_dict: Dict, delete: bool = False) -> Dict:
+    rule_set = acl_cr_dict['rule_set']
+    name: str = rule_set['name']
+    type: str = RULE_TYPE_MAPPING[rule_set["type"]]
+    interface = acl_cr_dict['interface'][5:] # remove preceding `PORT-` characters
+    if delete:
+        return ACLRequestData(name=name, type=type, interface=interface)
+    rule_set_entry = rule_set['entries'][0]
+    rule_set_entry_match = rule_set_entry['match']
+    rule_set_entry_action = rule_set_entry['action']
+
+    return ACLRequestData(
+        name=name,
+        type=type,
+        sequence_id=rule_set_entry['sequence_id'],
+        source_address=rule_set_entry_match['src_address'],
+        destination_address=rule_set_entry_match['dst_address'],
+        forwarding_action=FORWARDING_ACTION_MAPPING[rule_set_entry_action['forward_action']],
+        interface=interface,
+        dscp=rule_set_entry_match["dscp"],
+        tcp_flags=rule_set_entry_match["flags"],
+        source_port=rule_set_entry_match['src_port'],
+        destination_port=rule_set_entry_match['dst_port']
+    )
diff --git a/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config_ipinfusion_proprietary.xml b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config_ipinfusion_proprietary.xml
new file mode 100644
index 0000000000000000000000000000000000000000..6e502154f16a7a9d4ce0afc0c49ab96b3a2bd979
--- /dev/null
+++ b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config_ipinfusion_proprietary.xml
@@ -0,0 +1,26 @@
+<acl xmlns="http://www.ipinfusion.com/yang/ocnos/ipi-acl">
+  <interfaces>
+    <interface>
+      <name>{{interface}}</name>
+      <config>
+        <name>{{interface}}</name>
+      </config>
+      <ingress-acl-sets>
+        <ingress-acl-set {% if operation == "delete" %}operation="delete"{% endif %}>
+          {% if type is defined %}<acl-type>{{type}}</acl-type>{% endif %}
+          <access-groups>
+            <access-group>
+              <acl-name>{{name}}</acl-name>
+              <config>
+                <acl-name>{{name}}</acl-name>
+              </config>
+            </access-group>
+          </access-groups>
+          <config>
+            {% if type is defined %}<acl-type>{{type}}</acl-type>{% endif %}
+          </config>
+        </ingress-acl-set>
+      </ingress-acl-sets>
+    </interface>
+  </interfaces>
+</acl>
\ No newline at end of file
diff --git a/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/enable_ingress_filter.xml b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/enable_ingress_filter.xml
new file mode 100644
index 0000000000000000000000000000000000000000..274028657547dd31d20654e2a59ac11554cb01d5
--- /dev/null
+++ b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/enable_ingress_filter.xml
@@ -0,0 +1,9 @@
+<profiles xmlns="http://www.ipinfusion.com/yang/ocnos/ipi-platform"> 
+	<hardware-profile> 
+	<filters> 
+	<config> 
+		<ingress-ipv4-extended {% if operation == "delete" %}operation="delete"{% endif %}></ingress-ipv4-extended>
+	</config> 
+	</filters> 
+	</hardware-profile> 
+</profiles>
\ No newline at end of file
diff --git a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml
index e441004006e4cdd445f1d0244a9582b57956af40..2d8d3ee07b3a8df20a4b51be755e18b7aec982de 100644
--- a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml
+++ b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml
@@ -1,5 +1,4 @@
-<interfaces xmlns="http://openconfig.net/yang/interfaces" 
-            xmlns:oc-ip="http://openconfig.net/yang/interfaces/ip" >
+<interfaces xmlns="http://openconfig.net/yang/interfaces">
     <interface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}>
         <name>{{name}}</name>
         {% if operation is defined and operation != 'delete' %}
@@ -31,17 +30,20 @@
                 </vlan>
                 {% endif %}
                 {% if address_ip is defined %}
-                <oc-ip:ipv4>
-                    <oc-ip:addresses>
-                        <oc-ip:address>
-                            <oc-ip:ip>{{address_ip}}</oc-ip:ip>
-                            <oc-ip:config>
-                                <oc-ip:ip>{{address_ip}}</oc-ip:ip>
-                                <oc-ip:prefix-length>{{address_prefix}}</oc-ip:prefix-length>
-                            </oc-ip:config>
-                        </oc-ip:address>
-                    </oc-ip:addresses>
-                </oc-ip:ipv4>
+                <ipv4 xmlns="http://openconfig.net/yang/interfaces/ip">
+                    <config>
+                        {% if mtu is defined %}<mtu>{{mtu}}</mtu>{% endif%}
+                    </config>
+                    <addresses>
+                        <address>
+                            <ip>{{address_ip}}</ip>
+                            <config>
+                                <ip>{{address_ip}}</ip>
+                                <prefix-length>{{address_prefix}}</prefix-length>
+                            </config>
+                        </address>
+                    </addresses>
+                </ipv4>
                 {% endif %}
             </subinterface>
         </subinterfaces>
diff --git a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml
index 855f321b4a69ba1e660487c108a05d0ec4b5d475..e926796d039d54e30f6ba13eb5eb66bcec079c08 100644
--- a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml
+++ b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml
@@ -1,6 +1,10 @@
 <network-instances xmlns="http://openconfig.net/yang/network-instance">
     <network-instance>
         <name>{{name}}</name>
+        <config>
+            <name>{{name}}</name>
+            <type xmlns:oc-ni-types="http://openconfig.net/yang/network-instance-types">oc-ni-types:{{type}}</type>
+        </config>
         <interfaces>
             <interface{% if operation is defined %} xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" nc:operation="{{operation}}"{% endif %}>
                 <id>{{id}}</id>
diff --git a/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml
index c9c068e480c0569cfe5f97b78b28fbe03e2595f8..da66d97f053f509a1a595cdb1abc0bd1791ad0bc 100644
--- a/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml
+++ b/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml
@@ -9,15 +9,37 @@
                 <config>
                     <identifier xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:{{identifier}}</identifier>
                     <name>{{protocol_name}}</name>
+                    <enabled>true</enabled>
                 </config>
                 {% if identifier=='BGP' %}
                 <bgp>
+                    <name>{{as}}</name>
                     <global>
                         <config>
                             <as>{{as}}</as>
                             <router-id>{{router_id}}</router-id>
                         </config>
                     </global>
+                    {% if neighbors is defined %}
+                    <neighbors>
+                        {% for neighbor in neighbors %}
+                        <neighbor>
+                            <neighbor-address>{{neighbor['ip_address']}}</neighbor-address>
+                            <afi-safis>
+                                <afi-safi xmlns:oc-bgp-types="http://openconfig.net/yang/bgp-types">
+                                    <afi-safi-name>oc-bgp-types:IPV4_UNICAST</afi-safi-name>
+                                    <enabled>true</enabled>
+                                </afi-safi>
+                            </afi-safis>
+                            <config>
+                                <neighbor-address>{{neighbor['ip_address']}}</neighbor-address>
+                                <enabled>true</enabled>
+                                <peer-as>{{as}}</peer-as>
+                            </config>
+                        </neighbor>
+                        {% endfor %}
+                    </neighbors>
+                    {% endif %}
                 </bgp>
                 {% endif %}
                 {% endif %}
diff --git a/src/device/service/drivers/openconfig/templates/network_instance/table_connections/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/table_connections/edit_config.xml
index 46bf5e387789c7efc800ad96ed759748273bed34..35c535c6bd3f78e30fc2177ecc722b1115f54fc5 100644
--- a/src/device/service/drivers/openconfig/templates/network_instance/table_connections/edit_config.xml
+++ b/src/device/service/drivers/openconfig/templates/network_instance/table_connections/edit_config.xml
@@ -11,6 +11,9 @@
                     <src-protocol xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:{{src_protocol}}</src-protocol>
                     <dst-protocol xmlns:oc-pol-types="http://openconfig.net/yang/policy-types">oc-pol-types:{{dst_protocol}}</dst-protocol>
                     <address-family xmlns:oc-types="http://openconfig.net/yang/openconfig-types">oc-types:{{address_family}}</address-family>
+                    {% if False %}
+                        <dst-instance xmlns="http://www.ipinfusion.com/yang/ocnos/ipi-oc-ni-augments">{{as}}</dst-instance>
+                    {% endif %}
                     {% if default_import_policy is defined %}<default-import-policy>{{default_import_policy}}</default-import-policy>{% endif %}
                 </config>
                 {% endif %}
diff --git a/src/device/tests/test_unitary_openconfig_ocnos.py b/src/device/tests/test_unitary_openconfig_ocnos.py
new file mode 100644
index 0000000000000000000000000000000000000000..87d951581ad98147f8dd565af616fe034a346693
--- /dev/null
+++ b/src/device/tests/test_unitary_openconfig_ocnos.py
@@ -0,0 +1,210 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging, os, pytest, time
+from typing import Dict, Tuple
+os.environ['DEVICE_EMULATED_ONLY'] = 'YES'
+
+# pylint: disable=wrong-import-position
+from device.service.drivers.openconfig.OpenConfigDriver import OpenConfigDriver
+#from device.service.driver_api._Driver import (
+#    RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES, RESOURCE_ROUTING_POLICIES, RESOURCE_SERVICES
+#)
+
+logging.basicConfig(level=logging.DEBUG)
+#logging.getLogger('ncclient.operations.rpc').setLevel(logging.INFO)
+#logging.getLogger('ncclient.transport.parser').setLevel(logging.INFO)
+
+LOGGER = logging.getLogger(__name__)
+
+
+##### DRIVERS FIXTURE ##################################################################################################
+
+DEVICES = {
+    'CSGW1': {'address': '10.1.1.86', 'port': 830, 'settings': {
+        'username': 'ocnos', 'password': 'ocnos',
+        'vendor': None, 'force_running': False, 'hostkey_verify': False, 'look_for_keys': False, 'allow_agent': False,
+        'commit_per_rule': True, 'device_params': {'name': 'default'}, 'manager_params': {'timeout' : 120}
+    }},
+    'CSGW2': {'address': '10.1.1.87', 'port': 830, 'settings': {
+        'username': 'ocnos', 'password': 'ocnos',
+        'vendor': None, 'force_running': False, 'hostkey_verify': False, 'look_for_keys': False, 'allow_agent': False,
+        'commit_per_rule': True, 'device_params': {'name': 'default'}, 'manager_params': {'timeout' : 120}
+    }},
+}
+
+@pytest.fixture(scope='session')
+def drivers() -> Dict[str, OpenConfigDriver]:
+    _drivers : Dict[str, OpenConfigDriver] = dict()
+    for device_name, driver_params in DEVICES.items():
+        driver = OpenConfigDriver(driver_params['address'], driver_params['port'], **(driver_params['settings']))
+        driver.Connect()
+        _drivers[device_name] = driver
+    yield _drivers
+    time.sleep(1)
+    for _,driver in _drivers.items():
+        driver.Disconnect()
+
+
+def network_instance(ni_name, ni_type, ni_router_id=None, ni_route_distinguisher=None) -> Tuple[str, Dict]:
+    path = '/network_instance[{:s}]'.format(ni_name)
+    data = {'name': ni_name, 'type': ni_type}
+    if ni_router_id is not None: data['router_id'] = ni_router_id
+    if ni_route_distinguisher is not None: data['route_distinguisher'] = ni_route_distinguisher
+    return path, json.dumps(data)
+
+def network_instance_add_protocol_bgp(ni_name, ni_type, ni_router_id, ni_bgp_as, neighbors=[]) -> Tuple[str, Dict]:
+    path = '/network_instance[{:s}]/protocols[BGP]'.format(ni_name)
+    data = {
+        'name': ni_name, 'type': ni_type, 'router_id': ni_router_id, 'identifier': 'BGP',
+        'protocol_name': ni_bgp_as, 'as': ni_bgp_as
+    }
+    if len(neighbors) > 0:
+        data['neighbors'] = [
+            {'ip_address': neighbor_ip_address, 'remote_as': neighbor_remote_as}
+            for neighbor_ip_address, neighbor_remote_as in neighbors
+        ]
+    return path, json.dumps(data)
+
+def network_instance_add_protocol_direct(ni_name, ni_type) -> Tuple[str, Dict]:
+    path = '/network_instance[{:s}]/protocols[DIRECTLY_CONNECTED]'.format(ni_name)
+    data = {
+        'name': ni_name, 'type': ni_type, 'identifier': 'DIRECTLY_CONNECTED',
+        'protocol_name': 'DIRECTLY_CONNECTED'
+    }
+    return path, json.dumps(data)
+
+def network_instance_add_protocol_static(ni_name, ni_type) -> Tuple[str, Dict]:
+    path = '/network_instance[{:s}]/protocols[STATIC]'.format(ni_name)
+    data = {
+        'name': ni_name, 'type': ni_type, 'identifier': 'STATIC',
+        'protocol_name': 'STATIC'
+    }
+    return path, json.dumps(data)
+
+#def network_instance_static_route(ni_name, prefix, next_hop, next_hop_index=0) -> Tuple[str, Dict]:
+#    path = '/network_instance[{:s}]/static_route[{:s}]'.format(ni_name, prefix)
+#    data = {'name': ni_name, 'prefix': prefix, 'next_hop': next_hop, 'next_hop_index': next_hop_index}
+#    return path, json.dumps(data)
+
+def network_instance_add_table_connection(
+    ni_name, src_protocol, dst_protocol, address_family, default_import_policy, bgp_as=None
+) -> Tuple[str, Dict]:
+    path = '/network_instance[{:s}]/table_connections[{:s}][{:s}][{:s}]'.format(
+        ni_name, src_protocol, dst_protocol, address_family
+    )
+    data = {
+        'name': ni_name, 'src_protocol': src_protocol, 'dst_protocol': dst_protocol,
+        'address_family': address_family, 'default_import_policy': default_import_policy,
+    }
+    if bgp_as is not None: data['as'] = bgp_as
+    return path, json.dumps(data)
+
+def interface(
+    name, index, description=None, if_type=None, vlan_id=None, mtu=None, ipv4_address_prefix=None, enabled=None
+) -> Tuple[str, Dict]:
+    path = '/interface[{:s}]/subinterface[{:d}]'.format(name, index)
+    data = {'name': name, 'index': index}
+    if description is not None: data['description'] = description
+    if if_type     is not None: data['type'       ] = if_type
+    if vlan_id     is not None: data['vlan_id'    ] = vlan_id
+    if mtu         is not None: data['mtu'        ] = mtu
+    if enabled     is not None: data['enabled'    ] = enabled
+    if ipv4_address_prefix is not None:
+        ipv4_address, ipv4_prefix = ipv4_address_prefix
+        data['address_ip'    ] = ipv4_address
+        data['address_prefix'] = ipv4_prefix
+    return path, json.dumps(data)
+
+def network_instance_interface(ni_name, ni_type, if_name, if_index) -> Tuple[str, Dict]:
+    path = '/network_instance[{:s}]/interface[{:s}.{:d}]'.format(ni_name, if_name, if_index)
+    data = {'name': ni_name, 'type': ni_type, 'id': if_name, 'interface': if_name, 'subinterface': if_index}
+    return path, json.dumps(data)
+
+def test_configure(drivers : Dict[str, OpenConfigDriver]):
+    #resources_to_get = []
+    #resources_to_get = [RESOURCE_ENDPOINTS]
+    #resources_to_get = [RESOURCE_INTERFACES]
+    #resources_to_get = [RESOURCE_NETWORK_INSTANCES]
+    #resources_to_get = [RESOURCE_ROUTING_POLICIES]
+    #resources_to_get = [RESOURCE_SERVICES]
+    #LOGGER.info('resources_to_get = {:s}'.format(str(resources_to_get)))
+    #results_getconfig = driver.GetConfig(resources_to_get)
+    #LOGGER.info('results_getconfig = {:s}'.format(str(results_getconfig)))
+
+    csgw1_resources_to_set = [
+        network_instance('ecoc24', 'L3VRF', '192.168.150.1', '65001:1'),
+        network_instance_add_protocol_direct('ecoc24', 'L3VRF'),
+        network_instance_add_protocol_static('ecoc24', 'L3VRF'),
+        network_instance_add_protocol_bgp('ecoc24', 'L3VRF', '192.168.150.1', '65001', neighbors=[
+            ('192.168.150.2', '65001')
+        ]),
+        network_instance_add_table_connection('ecoc24', 'DIRECTLY_CONNECTED', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'),
+        network_instance_add_table_connection('ecoc24', 'STATIC', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'),
+    
+        interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500),
+        network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0),
+        interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.10.1', 24), enabled=True),
+    
+        interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500),
+        network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0),
+        interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.150.1', 24), enabled=True),
+    ]
+    LOGGER.info('CSGW1 resources_to_set = {:s}'.format(str(csgw1_resources_to_set)))
+    results_setconfig = drivers['CSGW1'].SetConfig(csgw1_resources_to_set)
+    LOGGER.info('CSGW1 results_setconfig = {:s}'.format(str(results_setconfig)))
+
+    csgw2_resources_to_set = [
+        network_instance('ecoc24', 'L3VRF', '192.168.150.2', '65001:1'),
+        network_instance_add_protocol_direct('ecoc24', 'L3VRF'),
+        network_instance_add_protocol_static('ecoc24', 'L3VRF'),
+        network_instance_add_protocol_bgp('ecoc24', 'L3VRF', '192.168.150.2', '65001', neighbors=[
+            ('192.168.150.1', '65001')
+        ]),
+        network_instance_add_table_connection('ecoc24', 'DIRECTLY_CONNECTED', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'),
+        network_instance_add_table_connection('ecoc24', 'STATIC', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'),
+    
+        interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500),
+        network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0),
+        interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.20.1', 24), enabled=True),
+    
+        interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500),
+        network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0),
+        interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.150.2', 24), enabled=True),
+    ]
+    LOGGER.info('CSGW2 resources_to_set = {:s}'.format(str(csgw2_resources_to_set)))
+    results_setconfig = drivers['CSGW2'].SetConfig(csgw2_resources_to_set)
+    LOGGER.info('CSGW2 results_setconfig = {:s}'.format(str(results_setconfig)))
+
+    csgw1_resources_to_delete = [
+        network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0),
+        network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0),
+        #interface('ce1', 0),
+        #interface('xe5', 0),
+        network_instance('ecoc24', 'L3VRF'),
+    ]
+    LOGGER.info('CSGW1 resources_to_delete = {:s}'.format(str(csgw1_resources_to_delete)))
+    results_deleteconfig = drivers['CSGW1'].DeleteConfig(csgw1_resources_to_delete)
+    LOGGER.info('CSGW1 results_deleteconfig = {:s}'.format(str(results_deleteconfig)))
+
+    csgw2_resources_to_delete = [
+        network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0),
+        network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0),
+        #interface('ce1', 0),
+        #interface('xe5', 0),
+        network_instance('ecoc24', 'L3VRF'),
+    ]
+    LOGGER.info('CSGW2 resources_to_delete = {:s}'.format(str(csgw2_resources_to_delete)))
+    results_deleteconfig = drivers['CSGW2'].DeleteConfig(csgw2_resources_to_delete)
+    LOGGER.info('CSGW2 results_deleteconfig = {:s}'.format(str(results_deleteconfig)))
diff --git a/src/dlt/gateway/legacy/settings.gradle.kts b/src/dlt/gateway/legacy/settings.gradle.kts
index 77fa0f0b22918cf306f0e5f07506a35e492142b4..6500a488a10c31fba79da633993989e5a7e7ec40 100644
--- a/src/dlt/gateway/legacy/settings.gradle.kts
+++ b/src/dlt/gateway/legacy/settings.gradle.kts
@@ -1,5 +1,5 @@
 /*
- * Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+ * Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
diff --git a/src/forecaster/requirements.in b/src/forecaster/requirements.in
index 6caa5d616f7b7efc525eb5d79a607b4005d0c4ac..9a31513799fd6aa5d915fb6c83a516176f290ce9 100644
--- a/src/forecaster/requirements.in
+++ b/src/forecaster/requirements.in
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-#numpy==1.23.*
+numpy<2.0.0
 pandas==1.5.*
 #prophet==1.1.*
 scikit-learn==1.1.*
diff --git a/src/kpi_manager/.gitlab-ci.yml b/src/kpi_manager/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..498cfd89fb3da85fec1b2ad0c930408eab215dc5
--- /dev/null
+++ b/src/kpi_manager/.gitlab-ci.yml
@@ -0,0 +1,131 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build kpi-manager:
+  variables:
+    IMAGE_NAME: 'kpi_manager' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
+    - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+
+# Apply unit test to the component
+unit_test kpi-manager:
+  variables:
+    IMAGE_NAME: 'kpi_manager' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build kpi-manager
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
+    - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi
+    - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi
+    - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi
+    - docker container prune -f
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker pull "cockroachdb/cockroach:latest-v22.2"
+    - docker volume create crdb
+    - >
+      docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080
+      --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123
+      --volume "crdb:/cockroach/cockroach-data"
+      cockroachdb/cockroach:latest-v22.2 start-single-node
+    - echo "Waiting for initialization..."
+    - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done
+    - docker logs crdb
+    - docker ps -a
+    - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $CRDB_ADDRESS
+    - >
+      docker run --name $IMAGE_NAME -d -p 30010:30010
+      --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require"
+      --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+    - docker ps -a
+    - sleep 5
+    - docker logs $IMAGE_NAME
+    - >
+      docker exec -i $IMAGE_NAME bash -c
+      "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}_report.xml $IMAGE_NAME/tests/test_*.py"
+    - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+  coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    - docker volume rm -f crdb
+    - docker network rm teraflowbridge
+    - docker volume prune --force
+    - docker image prune --force
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - src/$IMAGE_NAME/tests/Dockerfile
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+  artifacts:
+      when: always
+      reports:
+        junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
+
+## Deployment of the service in Kubernetes Cluster
+#deploy context:
+#  variables:
+#    IMAGE_NAME: 'context' # name of the microservice
+#    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+#  stage: deploy
+#  needs:
+#    - unit test context
+#    # - integ_test execute
+#  script:
+#    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+#    - kubectl get all
+#  # environment:
+#  #   name: test
+#  #   url: https://example.com
+#  #   kubernetes:
+#  #     namespace: test
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual    
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
diff --git a/src/kpi_manager/Dockerfile b/src/kpi_manager/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..a57957759a32b45b715e327b54ebe004a6edf265
--- /dev/null
+++ b/src/kpi_manager/Dockerfile
@@ -0,0 +1,68 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ git && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/kpi_manager
+WORKDIR /var/teraflow/kpi_manager
+COPY src/kpi_manager/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/kpi_manager/. kpi_manager/
+
+# Start the service
+ENTRYPOINT ["python", "-m", "kpi_manager.service"]
diff --git a/src/kpi_manager/README.md b/src/kpi_manager/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..c1feadcc4843db26a219d1e3b37833ddd80b18dc
--- /dev/null
+++ b/src/kpi_manager/README.md
@@ -0,0 +1,29 @@
+# How to locally run and test KPI manager micro-service
+
+## --- File links need to be updated. ---
+### Pre-requisets 
+The following requirements should be fulfilled before the execuation of KPI management service.
+
+1. Verify that [kpi_management.proto](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/proto/kpi_management.proto) file exists and grpcs file are generated sucessfully. 
+2. Virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/requirements.in) are installed sucessfully.
+3. Verify the creation of required database and table.
+[KPI DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/database/tests/KpiDBtests.py) python file enlist the functions to create tables and database and
+[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/service/database/KpiEngine.py) contains the DB string, update the string as per your deployment.
+
+### Messages format templates
+["Messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_messages.py) python file enlist the basic gRPC messages format used during the testing.
+
+### Test file
+["KPI management test"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_kpi_manager.py) python file enlist different tests conducted during the experiment.
+
+### Flow of execution (Kpi Maanager Service functions)
+1. Call the `create_database()` and `create_tables()` functions from `Kpi_DB` class to create the required database and table if they don't exist. Call `verify_tables` to verify the existence of KPI table.
+
+2. Call the gRPC method `SetKpiDescriptor(KpiDescriptor)->KpiId` to add the KpiDescriptor in `Kpi` DB. `KpiDescriptor` and `KpiId` are both pre-defined gRPC message types.
+
+3. Call `GetKpiDescriptor(KpiId)->KpiDescriptor` to read the `KpiDescriptor` from DB and `DeleteKpiDescriptor(KpiId)` to delete the `KpiDescriptor` from DB.
+
+4. Call `SelectKpiDescriptor(KpiDescriptorFilter)->KpiDescriptorList` to get all `KpiDescriptor` objects that matches the filter criteria. `KpiDescriptorFilter` and `KpiDescriptorList` are pre-defined gRPC message types.
+
+## For KPI composer and KPI writer
+The functionalities of KPI composer and writer is heavily dependent upon Telemetery service. Therfore, these services has other pre-requsites that are mention [here](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/requirements.in).
diff --git a/src/kpi_manager/__init__.py b/src/kpi_manager/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_manager/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_manager/client/KpiManagerClient.py b/src/kpi_manager/client/KpiManagerClient.py
new file mode 100755
index 0000000000000000000000000000000000000000..672d82f2d78ea8b477429c5ba03fbb4331bae7c7
--- /dev/null
+++ b/src/kpi_manager/client/KpiManagerClient.py
@@ -0,0 +1,77 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
+
+from common.proto.context_pb2 import Empty
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.proto.kpi_manager_pb2_grpc import KpiManagerServiceStub
+from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 10
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class KpiManagerClient:
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.KPIMANAGER) 
+        if not port: port = get_service_port_grpc(ServiceNameEnum.KPIMANAGER) 
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
+
+        self.channel = None
+        self.stub = None
+        self.connect()
+        LOGGER.debug('Channel created')
+
+    def connect(self):
+        self.channel = grpc.insecure_channel(self.endpoint)
+        self.stub = KpiManagerServiceStub(self.channel)
+
+    def close(self):
+        if self.channel is not None: self.channel.close()
+        self.channel = None
+        self.stub = None
+
+    @RETRY_DECORATOR
+    def SetKpiDescriptor(self, request : KpiDescriptor) -> KpiId:
+        LOGGER.debug('SetKpiDescriptor: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SetKpiDescriptor(request)
+        LOGGER.debug('SetKpiDescriptor result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def DeleteKpiDescriptor(self,request : KpiId) -> Empty:
+        LOGGER.debug('DeleteKpiDescriptor: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.DeleteKpiDescriptor(request)
+        LOGGER.debug('DeleteKpiDescriptor result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def GetKpiDescriptor(self, request : KpiId) -> KpiDescriptor:
+        LOGGER.debug('GetKpiDescriptor: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.GetKpiDescriptor(request)
+        LOGGER.debug('GetKpiDescriptor result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def SelectKpiDescriptor(self, filter : KpiDescriptorFilter) -> KpiDescriptorList:
+        LOGGER.debug('SelectKpiDescriptor: {:s}'.format(grpc_message_to_json_string(filter)))
+        response = self.stub.SelectKpiDescriptor(filter)
+        LOGGER.debug('SelectKpiDescriptor result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
diff --git a/src/kpi_manager/client/__init__.py b/src/kpi_manager/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..48f7d354a2f3fe6e91bb79b3ca956f68c36ed9e3
--- /dev/null
+++ b/src/kpi_manager/client/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+# 
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_manager/database/KpiEngine.py b/src/kpi_manager/database/KpiEngine.py
new file mode 100644
index 0000000000000000000000000000000000000000..dff406de666b5f68539b8897fa26e0b3ad51286b
--- /dev/null
+++ b/src/kpi_manager/database/KpiEngine.py
@@ -0,0 +1,44 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, sqlalchemy
+from common.Settings import get_setting
+
+LOGGER = logging.getLogger(__name__)
+
+# CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}'
+CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}'
+
+class KpiEngine:
+    @staticmethod
+    def get_engine() -> sqlalchemy.engine.Engine:
+        crdb_uri = get_setting('CRDB_URI', default=None)
+        if crdb_uri is None:
+            CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE')
+            CRDB_SQL_PORT  = get_setting('CRDB_SQL_PORT')
+            CRDB_DATABASE  = 'tfs_kpi_mgmt'             # TODO: define variable get_setting('CRDB_DATABASE_KPI_MGMT')
+            CRDB_USERNAME  = get_setting('CRDB_USERNAME')
+            CRDB_PASSWORD  = get_setting('CRDB_PASSWORD')
+            CRDB_SSLMODE   = get_setting('CRDB_SSLMODE')
+            crdb_uri = CRDB_URI_TEMPLATE.format(
+                CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
+        # crdb_uri = CRDB_URI_TEMPLATE.format(
+        #         CRDB_USERNAME, CRDB_PASSWORD, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
+        try:
+            engine = sqlalchemy.create_engine(crdb_uri, echo=False)
+            LOGGER.info(' KpiDBmanager initalized with DB URL: {:}'.format(crdb_uri))
+        except: # pylint: disable=bare-except # pragma: no cover
+            LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri)))
+            return None # type: ignore
+        return engine 
diff --git a/src/kpi_manager/database/KpiModel.py b/src/kpi_manager/database/KpiModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c2fdff0664883bcc727096ddeda562fdbe3085d
--- /dev/null
+++ b/src/kpi_manager/database/KpiModel.py
@@ -0,0 +1,84 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy import Column, Integer, String, Text
+from sqlalchemy.orm import registry
+from common.proto.kpi_manager_pb2 import KpiDescriptor
+
+logging.basicConfig(level=logging.INFO)
+LOGGER = logging.getLogger(__name__)
+
+# Create a base class for declarative models
+Base = registry().generate_base()
+
+class Kpi(Base):
+    __tablename__ = 'kpi'
+
+    kpi_id          = Column(UUID(as_uuid=False), primary_key=True)
+    kpi_description = Column(Text               , nullable=False)
+    kpi_sample_type = Column(Integer            , nullable=False)
+    device_id       = Column(String             , nullable=False)
+    endpoint_id     = Column(String             , nullable=False)
+    service_id      = Column(String             , nullable=False)
+    slice_id        = Column(String             , nullable=False)
+    connection_id   = Column(String             , nullable=False)
+    link_id         = Column(String             , nullable=False)
+
+    # helps in logging the information
+    def __repr__(self):
+        return (f"<Kpi(kpi_id='{self.kpi_id}', kpi_description='{self.kpi_description}', "
+                f"kpi_sample_type='{self.kpi_sample_type}', device_id='{self.device_id}', "
+                f"endpoint_id='{self.endpoint_id}', service_id='{self.service_id}', "
+                f"slice_id='{self.slice_id}', connection_id='{self.connection_id}', "
+                f"link_id='{self.link_id}')>")
+
+    @classmethod
+    def convert_KpiDescriptor_to_row(cls, request):
+        """
+        Create an instance of Kpi from a request object.
+        Args:    request: The request object containing the data.
+        Returns: An instance of Kpi initialized with data from the request.
+        """
+        return cls(
+            kpi_id          = request.kpi_id.kpi_id.uuid,
+            kpi_description = request.kpi_description,
+            kpi_sample_type = request.kpi_sample_type,
+            device_id       = request.device_id.device_uuid.uuid,
+            endpoint_id     = request.endpoint_id.endpoint_uuid.uuid,
+            service_id      = request.service_id.service_uuid.uuid,
+            slice_id        = request.slice_id.slice_uuid.uuid,
+            connection_id   = request.connection_id.connection_uuid.uuid,
+            link_id         = request.link_id.link_uuid.uuid
+        )
+    
+    @classmethod
+    def convert_row_to_KpiDescriptor(cls, row):
+        """
+        Create and return a dictionary representation of a Kpi instance.       
+        Args:   row: The Kpi instance (row) containing the data.
+        Returns: KpiDescriptor object
+        """
+        response = KpiDescriptor()
+        response.kpi_id.kpi_id.uuid                 = row.kpi_id
+        response.kpi_description                    = row.kpi_description
+        response.kpi_sample_type                    = row.kpi_sample_type
+        response.service_id.service_uuid.uuid       = row.service_id
+        response.device_id.device_uuid.uuid         = row.device_id
+        response.slice_id.slice_uuid.uuid           = row.slice_id
+        response.endpoint_id.endpoint_uuid.uuid     = row.endpoint_id
+        response.connection_id.connection_uuid.uuid = row.connection_id
+        response.link_id.link_uuid.uuid             = row.link_id
+        return response
diff --git a/src/kpi_manager/database/Kpi_DB.py b/src/kpi_manager/database/Kpi_DB.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b60640707c8d0c2ce90e5ab135ddf6fd4c91f63
--- /dev/null
+++ b/src/kpi_manager/database/Kpi_DB.py
@@ -0,0 +1,155 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import sqlalchemy_utils
+from sqlalchemy.orm import sessionmaker
+from kpi_manager.database.KpiEngine import KpiEngine
+from kpi_manager.database.KpiModel import Kpi as KpiModel
+from common.method_wrappers.ServiceExceptions import ( 
+    AlreadyExistsException, OperationFailedException , NotFoundException)
+
+LOGGER = logging.getLogger(__name__)
+DB_NAME = "tfs_kpi_mgmt"
+
+class KpiDB:
+    def __init__(self):
+        self.db_engine = KpiEngine.get_engine()
+        if self.db_engine is None:
+            LOGGER.error('Unable to get SQLAlchemy DB Engine...')
+            return False
+        self.db_name = DB_NAME
+        self.Session = sessionmaker(bind=self.db_engine)
+
+    def create_database(self) -> None:
+        if not sqlalchemy_utils.database_exists(self.db_engine.url):
+            LOGGER.debug("Database created. {:}".format(self.db_engine.url))
+            sqlalchemy_utils.create_database(self.db_engine.url)
+
+    def drop_database(self) -> None:
+        if sqlalchemy_utils.database_exists(self.db_engine.url):
+            sqlalchemy_utils.drop_database(self.db_engine.url)
+
+    def create_tables(self):
+        try:
+            KpiModel.metadata.create_all(self.db_engine)     # type: ignore
+            LOGGER.debug("Tables created in the DB Name: {:}".format(self.db_name))
+        except Exception as e:
+            LOGGER.debug("Tables cannot be created in the kpi database. {:s}".format(str(e)))
+            raise OperationFailedException ("Tables can't be created", extra_details=["unable to create table {:}".format(e)])
+
+    def verify_tables(self):
+        try:
+            with self.db_engine.connect() as connection:
+                result = connection.execute("SHOW TABLES;")
+                tables = result.fetchall()      # type: ignore
+                LOGGER.debug("Tables verified: {:}".format(tables))
+        except Exception as e:
+            LOGGER.debug("Unable to fetch Table names. {:s}".format(str(e)))
+
+    def add_row_to_db(self, row):
+        session = self.Session()
+        try:
+            session.add(row)
+            session.commit()
+            LOGGER.debug(f"Row inserted into {row.__class__.__name__} table.")
+            return True
+        except Exception as e:
+            session.rollback()
+            if "psycopg2.errors.UniqueViolation" in str(e):
+                LOGGER.error(f"Unique key voilation: {row.__class__.__name__} table. {str(e)}")
+                raise AlreadyExistsException(row.__class__.__name__, row,
+                                             extra_details=["Unique key voilation: {:}".format(e)] )
+            else:
+                LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}")
+                raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)])
+        finally:
+            session.close()
+    
+    def search_db_row_by_id(self, model, col_name, id_to_search):
+        session = self.Session()
+        try:
+            entity = session.query(model).filter_by(**{col_name: id_to_search}).first()
+            if entity:
+                # LOGGER.debug(f"{model.__name__} ID found: {str(entity)}")
+                return entity
+            else:
+                LOGGER.debug(f"{model.__name__} ID not found, No matching row: {str(id_to_search)}")
+                print("{:} ID not found, No matching row: {:}".format(model.__name__, id_to_search))
+                return None
+        except Exception as e:
+            session.rollback()
+            LOGGER.debug(f"Failed to retrieve {model.__name__} ID. {str(e)}")
+            raise OperationFailedException ("search by column id", extra_details=["unable to search row {:}".format(e)])
+        finally:
+            session.close()
+    
+    def delete_db_row_by_id(self, model, col_name, id_to_search):
+        session = self.Session()
+        try:
+            record = session.query(model).filter_by(**{col_name: id_to_search}).first()
+            if record:
+                session.delete(record)
+                session.commit()
+                LOGGER.debug("Deleted %s with %s: %s", model.__name__, col_name, id_to_search)
+            else:
+                LOGGER.debug("%s with %s %s not found", model.__name__, col_name, id_to_search)
+                return None
+        except Exception as e:
+            session.rollback()
+            LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e)
+            raise OperationFailedException ("Deletion by column id", extra_details=["unable to delete row {:}".format(e)])
+        finally:
+            session.close()
+
+    def select_with_filter(self, model, filter_object):
+        session = self.Session()
+        try:
+            query = session.query(KpiModel)
+            # Apply filters based on the filter_object
+            if filter_object.kpi_id:
+                query = query.filter(KpiModel.kpi_id.in_([k.kpi_id.uuid for k in filter_object.kpi_id]))
+
+            if filter_object.kpi_sample_type:
+                query = query.filter(KpiModel.kpi_sample_type.in_(filter_object.kpi_sample_type))
+
+            if filter_object.device_id:
+                query = query.filter(KpiModel.device_id.in_([d.device_uuid.uuid for d in filter_object.device_id]))
+
+            if filter_object.endpoint_id:
+                query = query.filter(KpiModel.endpoint_id.in_([e.endpoint_uuid.uuid for e in filter_object.endpoint_id]))
+
+            if filter_object.service_id:
+                query = query.filter(KpiModel.service_id.in_([s.service_uuid.uuid for s in filter_object.service_id]))
+
+            if filter_object.slice_id:
+                query = query.filter(KpiModel.slice_id.in_([s.slice_uuid.uuid for s in filter_object.slice_id]))
+
+            if filter_object.connection_id:
+                query = query.filter(KpiModel.connection_id.in_([c.connection_uuid.uuid for c in filter_object.connection_id]))
+
+            if filter_object.link_id:
+                query = query.filter(KpiModel.link_id.in_([l.link_uuid.uuid for l in filter_object.link_id]))
+            result = query.all()
+            
+            if result:
+                LOGGER.debug(f"Fetched filtered rows from {model.__name__} table with filters: {filter_object}") #  - Results: {result}
+            else:
+                LOGGER.debug(f"No matching row found in {model.__name__} table with filters: {filter_object}")
+            return result
+        except Exception as e:
+            LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filter_object} ::: {e}")
+            raise OperationFailedException ("Select by filter", extra_details=["unable to apply the filter {:}".format(e)])
+        finally:
+            session.close()
diff --git a/src/kpi_manager/database/__init__.py b/src/kpi_manager/database/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_manager/database/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_manager/requirements.in b/src/kpi_manager/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..3e98fef362277dbf60019902e115d1c733bea9e7
--- /dev/null
+++ b/src/kpi_manager/requirements.in
@@ -0,0 +1,18 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+psycopg2-binary==2.9.*
+SQLAlchemy==1.4.*
+sqlalchemy-cockroachdb==1.4.*
+SQLAlchemy-Utils==0.38.*
diff --git a/src/kpi_manager/service/KpiManagerService.py b/src/kpi_manager/service/KpiManagerService.py
new file mode 100755
index 0000000000000000000000000000000000000000..b69a926a94c6cf10a680fe1b15d065f6bc073c97
--- /dev/null
+++ b/src/kpi_manager/service/KpiManagerService.py
@@ -0,0 +1,29 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from common.proto.kpi_manager_pb2_grpc import add_KpiManagerServiceServicer_to_server
+from kpi_manager.service.KpiManagerServiceServicerImpl import KpiManagerServiceServicerImpl
+
+
+class KpiManagerService(GenericGrpcService):
+    def __init__(self, cls_name: str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.KPIMANAGER)
+        super().__init__(port, cls_name=cls_name)
+        self.kpiManagerService_servicer = KpiManagerServiceServicerImpl()
+
+    def install_servicers(self):
+        add_KpiManagerServiceServicer_to_server(self.kpiManagerService_servicer, self.server)
diff --git a/src/kpi_manager/service/KpiManagerServiceServicerImpl.py b/src/kpi_manager/service/KpiManagerServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd22474829ea0dfb6b1a25e70bbb4d5440c0216b
--- /dev/null
+++ b/src/kpi_manager/service/KpiManagerServiceServicerImpl.py
@@ -0,0 +1,94 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import logging, grpc
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+from common.proto.context_pb2 import Empty
+from common.proto.kpi_manager_pb2_grpc import KpiManagerServiceServicer
+from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList
+from kpi_manager.database.Kpi_DB import KpiDB
+from kpi_manager.database.KpiModel import Kpi as KpiModel
+
+LOGGER = logging.getLogger(__name__)
+METRICS_POOL = MetricsPool('KpiManager', 'NBIgRPC')
+
+class KpiManagerServiceServicerImpl(KpiManagerServiceServicer):
+    def __init__(self):
+        LOGGER.info('Init KpiManagerService')
+        self.kpi_db_obj = KpiDB()
+    
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetKpiDescriptor(self, request: KpiDescriptor, grpc_context: grpc.ServicerContext # type: ignore
+                        ) -> KpiId: # type: ignore
+        response = KpiId()
+        LOGGER.info("Received gRPC message object: {:}".format(request))
+        try:
+            kpi_to_insert = KpiModel.convert_KpiDescriptor_to_row(request)
+            if(self.kpi_db_obj.add_row_to_db(kpi_to_insert)):
+                response.kpi_id.uuid = request.kpi_id.kpi_id.uuid
+                # LOGGER.info("Added Row: {:}".format(response))
+            return response
+        except Exception as e:
+            LOGGER.info("Unable to create KpiModel class object. {:}".format(e))
+    
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)        
+    def GetKpiDescriptor(self, request: KpiId, grpc_context: grpc.ServicerContext # type: ignore
+                         ) -> KpiDescriptor: # type: ignore
+        response = KpiDescriptor()
+        print("--> Received gRPC message object: {:}".format(request))
+        LOGGER.info("Received gRPC message object: {:}".format(request))
+        try: 
+            kpi_id_to_search = request.kpi_id.uuid
+            row = self.kpi_db_obj.search_db_row_by_id(KpiModel, 'kpi_id', kpi_id_to_search)
+            if row is None:
+                print ('No matching row found for kpi id: {:}'.format(kpi_id_to_search))
+                LOGGER.info('No matching row found kpi id: {:}'.format(kpi_id_to_search))
+                return Empty()
+            else:
+                response = KpiModel.convert_row_to_KpiDescriptor(row)
+                return response
+        except Exception as e:
+            print ('Unable to search kpi id. {:}'.format(e))
+            LOGGER.info('Unable to search kpi id. {:}'.format(e))
+            raise e
+    
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def DeleteKpiDescriptor(self, request: KpiId, grpc_context: grpc.ServicerContext # type: ignore
+                            ) -> Empty: # type: ignore
+        LOGGER.info("Received gRPC message object: {:}".format(request))
+        try:
+            kpi_id_to_search = request.kpi_id.uuid
+            self.kpi_db_obj.delete_db_row_by_id(KpiModel, 'kpi_id', kpi_id_to_search)
+        except Exception as e:
+            LOGGER.info('Unable to search kpi id. {:}'.format(e))
+        finally:
+            return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectKpiDescriptor(self, filter: KpiDescriptorFilter, grpc_context: grpc.ServicerContext # type: ignore
+                            ) -> KpiDescriptorList: # type: ignore
+        LOGGER.info("Received gRPC message object: {:}".format(filter))
+        response = KpiDescriptorList()
+        try:
+            rows = self.kpi_db_obj.select_with_filter(KpiModel, filter)
+        except Exception as e:
+            LOGGER.info('Unable to apply filter on kpi descriptor. {:}'.format(e))
+        try:
+            for row in rows:
+                kpiDescriptor_obj = KpiModel.convert_row_to_KpiDescriptor(row)
+                response.kpi_descriptor_list.append(kpiDescriptor_obj)
+            return response
+        except Exception as e:
+            LOGGER.info('Unable to process filter response {:}'.format(e))
diff --git a/src/kpi_manager/service/__init__.py b/src/kpi_manager/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_manager/service/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_manager/service/__main__.py b/src/kpi_manager/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..244d5afa373a6462a0382a0ed26a588088a689a1
--- /dev/null
+++ b/src/kpi_manager/service/__main__.py
@@ -0,0 +1,51 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from common.Settings import get_log_level
+from .KpiManagerService import KpiManagerService
+
+terminate = threading.Event()
+LOGGER = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global LOGGER # pylint: disable=global-statement
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level)
+    LOGGER = logging.getLogger(__name__)
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.debug('Starting...')
+
+    grpc_service = KpiManagerService()
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=1.0): pass
+
+    LOGGER.debug('Terminating...')
+    grpc_service.stop()
+
+    LOGGER.debug('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/kpi_manager/tests/test_kpi_db.py b/src/kpi_manager/tests/test_kpi_db.py
new file mode 100644
index 0000000000000000000000000000000000000000..e961c12bacdbac07f111b229435ed3d89d62581f
--- /dev/null
+++ b/src/kpi_manager/tests/test_kpi_db.py
@@ -0,0 +1,28 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import logging
+from kpi_manager.database.Kpi_DB import KpiDB
+
+LOGGER = logging.getLogger(__name__)
+
+def test_verify_databases_and_Tables():
+    LOGGER.info('>>> test_verify_Tables : START <<< ')
+    kpiDBobj = KpiDB()
+    kpiDBobj.drop_database()
+    kpiDBobj.verify_tables()
+    kpiDBobj.create_database()
+    kpiDBobj.create_tables()
+    kpiDBobj.verify_tables()
diff --git a/src/kpi_manager/tests/test_kpi_manager.py b/src/kpi_manager/tests/test_kpi_manager.py
new file mode 100755
index 0000000000000000000000000000000000000000..f0d9526d33694a683b70180eb3bc6de833bf1cfa
--- /dev/null
+++ b/src/kpi_manager/tests/test_kpi_manager.py
@@ -0,0 +1,301 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os, pytest
+import logging
+from typing import Union
+
+#from common.proto.context_pb2 import  Empty
+from common.Constants import ServiceNameEnum
+from common.Settings import ( 
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
+from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
+from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
+
+from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList
+from common.tools.service.GenericGrpcService import GenericGrpcService
+#from context.client.ContextClient import ContextClient
+
+# from device.service.driver_api.DriverFactory import DriverFactory
+# from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
+# from device.service.DeviceService import DeviceService
+# from device.client.DeviceClient import DeviceClient
+
+from kpi_manager.tests.test_messages import create_kpi_descriptor_request, create_kpi_filter_request, create_kpi_descriptor_request_a
+from kpi_manager.service.KpiManagerService import KpiManagerService
+from kpi_manager.client.KpiManagerClient import KpiManagerClient
+from kpi_manager.tests.test_messages import create_kpi_descriptor_request
+from kpi_manager.tests.test_messages import create_kpi_id_request
+
+
+#from monitoring.service.NameMapping import NameMapping
+
+#os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE'
+#from device.service.drivers import DRIVERS
+
+###########################
+# Tests Setup
+###########################
+
+LOCAL_HOST = '127.0.0.1'
+
+KPIMANAGER_SERVICE_PORT = get_service_port_grpc(ServiceNameEnum.KPIMANAGER)  # type: ignore
+os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(KPIMANAGER_SERVICE_PORT)
+
+# METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME'){}
+
+LOGGER = logging.getLogger(__name__)
+
+class MockContextService(GenericGrpcService):
+    # Mock Service implementing Context to simplify unitary tests of Monitoring
+
+    def __init__(self, bind_port: Union[str, int]) -> None:
+        super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService')
+
+    # pylint: disable=attribute-defined-outside-init
+    def install_servicers(self):
+        self.context_servicer = MockServicerImpl_Context()
+        add_ContextServiceServicer_to_server(self.context_servicer, self.server)
+
+# @pytest.fixture(scope='session')
+# def context_service():
+#     LOGGER.info('Initializing MockContextService...')
+#     _service = MockContextService(MOCKSERVICE_PORT)
+#     _service.start()
+    
+#     LOGGER.info('Yielding MockContextService...')
+#     yield _service
+
+#     LOGGER.info('Terminating MockContextService...')
+#     _service.context_servicer.msg_broker.terminate()
+#     _service.stop()
+
+#     LOGGER.info('Terminated MockContextService...')
+
+# @pytest.fixture(scope='session')
+# def context_client(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument
+#     LOGGER.info('Initializing ContextClient...')
+#     _client = ContextClient()
+    
+#     LOGGER.info('Yielding ContextClient...')
+#     yield _client
+
+#     LOGGER.info('Closing ContextClient...')
+#     _client.close()
+
+#     LOGGER.info('Closed ContextClient...')
+
+# @pytest.fixture(scope='session')
+# def device_service(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument
+#     LOGGER.info('Initializing DeviceService...')
+#     driver_factory = DriverFactory(DRIVERS)
+#     driver_instance_cache = DriverInstanceCache(driver_factory)
+#     _service = DeviceService(driver_instance_cache)
+#     _service.start()
+
+#     # yield the server, when test finishes, execution will resume to stop it
+#     LOGGER.info('Yielding DeviceService...')
+#     yield _service
+
+#     LOGGER.info('Terminating DeviceService...')
+#     _service.stop()
+
+#     LOGGER.info('Terminated DeviceService...')
+
+# @pytest.fixture(scope='session')
+# def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument
+#     LOGGER.info('Initializing DeviceClient...')
+#     _client = DeviceClient()
+
+#     LOGGER.info('Yielding DeviceClient...')
+#     yield _client
+
+#     LOGGER.info('Closing DeviceClient...')
+#     _client.close()
+
+#     LOGGER.info('Closed DeviceClient...')
+
+# @pytest.fixture(scope='session')
+# def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument
+#     LOGGER.info('Initializing DeviceClient...')
+#     _client = DeviceClient()
+
+#     LOGGER.info('Yielding DeviceClient...')
+#     yield _client
+
+#     LOGGER.info('Closing DeviceClient...')
+#     _client.close()
+
+#     LOGGER.info('Closed DeviceClient...')
+
+# This fixture will be requested by test cases and last during testing session
+@pytest.fixture(scope='session')
+def kpi_manager_service():
+    LOGGER.info('Initializing KpiManagerService...')
+    #name_mapping = NameMapping()
+    # _service = MonitoringService(name_mapping)
+    # _service = KpiManagerService(name_mapping)
+    _service = KpiManagerService()
+    _service.start()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding KpiManagerService...')
+    yield _service
+
+    LOGGER.info('Terminating KpiManagerService...')
+    _service.stop()
+
+    LOGGER.info('Terminated KpiManagerService...')
+
+# This fixture will be requested by test cases and last during testing session.
+# The client requires the server, so client fixture has the server as dependency.
+# def monitoring_client(monitoring_service : MonitoringService): (Add for better understanding)
+@pytest.fixture(scope='session')
+def kpi_manager_client(kpi_manager_service : KpiManagerService): # pylint: disable=redefined-outer-name,unused-argument
+    LOGGER.info('Initializing KpiManagerClient...')
+    _client = KpiManagerClient()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding KpiManagerClient...')
+    yield _client
+
+    LOGGER.info('Closing KpiManagerClient...')
+    _client.close()
+
+    LOGGER.info('Closed KpiManagerClient...')
+
+##################################################
+# Prepare Environment, should be the first test
+##################################################
+
+# # ERROR on this test --- 
+# def test_prepare_environment(
+#     context_client : ContextClient,                 # pylint: disable=redefined-outer-name,unused-argument
+# ):
+#     context_id = json_context_id(DEFAULT_CONTEXT_NAME)
+#     context_client.SetContext(Context(**json_context(DEFAULT_CONTEXT_NAME)))
+#     context_client.SetTopology(Topology(**json_topology(DEFAULT_TOPOLOGY_NAME, context_id=context_id)))
+
+###########################
+# Tests Implementation of Kpi Manager
+###########################
+
+# ---------- 3rd Iteration Tests ----------------
+# def test_SetKpiDescriptor(kpi_manager_client):
+#     LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ")
+#     response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+#     LOGGER.info("Response gRPC message object: {:}".format(response))
+#     assert isinstance(response, KpiId)
+
+# def test_DeleteKpiDescriptor(kpi_manager_client):
+#     LOGGER.info(" >>> test_DeleteKpiDescriptor: START <<< ")
+#     # adding KPI
+#     response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+#     # deleting KPI
+#     del_response = kpi_manager_client.DeleteKpiDescriptor(response_id)
+#     # select KPI
+#     kpi_manager_client.GetKpiDescriptor(response_id)
+#     LOGGER.info("Response of delete method gRPC message object: {:}".format(del_response))
+#     assert isinstance(del_response, Empty)
+
+def test_GetKpiDescriptor(kpi_manager_client):
+    LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ")
+    # adding KPI
+    response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+    # get KPI
+    response = kpi_manager_client.GetKpiDescriptor(response_id)
+    LOGGER.info("Response gRPC message object: {:}".format(response))
+
+    LOGGER.info(" >>> calling GetKpiDescriptor with random ID")
+    rand_response = kpi_manager_client.GetKpiDescriptor(create_kpi_id_request())
+    LOGGER.info("Response gRPC message object: {:}".format(rand_response))
+
+    assert isinstance(response, KpiDescriptor)
+
+# def test_SelectKpiDescriptor(kpi_manager_client):
+#     LOGGER.info(" >>> test_SelectKpiDescriptor: START <<< ")
+#     # adding KPI
+#     kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+#     # select KPI(s)    
+#     response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request())
+#     LOGGER.info("Response gRPC message object: {:}".format(response))
+#     assert isinstance(response, KpiDescriptorList)
+
+# def test_set_list_of_KPIs(kpi_manager_client):
+#     LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ")
+#     KPIs_TO_SEARCH = ["node_in_power_total", "node_in_current_total", "node_out_power_total"]
+#     # adding KPI
+#     for kpi in KPIs_TO_SEARCH:
+#        kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(kpi))
+    
+
+# ---------- 2nd Iteration Tests -----------------
+# def test_SetKpiDescriptor(kpi_manager_client):
+#     LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ")
+#     with open("kpi_manager/tests/KPI_configs.json", 'r') as file:
+#         data = json.load(file)
+#         _descriptors = data.get('KPIs', [])
+#     for _descritor_name in _descriptors:
+#         response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a(_descritor_name))
+#         LOGGER.info("Response gRPC message object: {:}".format(response))
+#     assert isinstance(response, KpiId)
+
+# def test_GetKpiDescriptor(kpi_manager_client):
+#     LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ")
+#     response = kpi_manager_client.GetKpiDescriptor(create_kpi_id_request())
+#     LOGGER.info("Response gRPC message object: {:}".format(response))
+#     assert isinstance(response, KpiDescriptor)
+
+# def test_DeleteKpiDescriptor(kpi_manager_client):
+#     LOGGER.info(" >>> test_DeleteKpiDescriptor: START <<< ")
+#     response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+#     del_response = kpi_manager_client.DeleteKpiDescriptor(response)
+#     kpi_manager_client.GetKpiDescriptor(response)
+#     LOGGER.info("Response of delete method gRPC message object: {:}".format(del_response))
+#     assert isinstance(del_response, Empty)
+
+# def test_SelectKpiDescriptor(kpi_manager_client):
+#     LOGGER.info(" >>> test_SelectKpiDescriptor: START <<< ")
+#     kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request_a())
+#     response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request_a())
+#     LOGGER.info("Response gRPC message object: {:}".format(response))
+#     assert isinstance(response, KpiDescriptorList)
+
+# ------------- INITIAL TESTs ----------------
+# Test case that makes use of client fixture to test server's CreateKpi method
+# def test_set_kpi(kpi_manager_client): # pylint: disable=redefined-outer-name
+#     # make call to server
+#     LOGGER.warning('test_create_kpi requesting')
+#     for i in range(3):
+#         response = kpi_manager_client.SetKpiDescriptor(create_kpi_request(str(i+1)))
+#         LOGGER.debug(str(response))
+#         assert isinstance(response, KpiId)
+
+# # Test case that makes use of client fixture to test server's DeleteKpi method
+# def test_delete_kpi(kpi_manager_client): # pylint: disable=redefined-outer-name
+#     # make call to server
+#     LOGGER.warning('delete_kpi requesting')
+#     response = kpi_manager_client.SetKpiDescriptor(create_kpi_request('4'))
+#     response = kpi_manager_client.DeleteKpiDescriptor(response)
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, Empty)
+
+# # Test case that makes use of client fixture to test server's GetKpiDescriptor method
+# def test_select_kpi_descriptor(kpi_manager_client): # pylint: disable=redefined-outer-name
+#     LOGGER.warning('test_selectkpidescritor begin')
+#     response = kpi_manager_client.SelectKpiDescriptor(create_kpi_filter_request())
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, KpiDescriptorList)
diff --git a/src/kpi_manager/tests/test_messages.py b/src/kpi_manager/tests/test_messages.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b5c45859b6c10056211f9f33df950d9668c11ea
--- /dev/null
+++ b/src/kpi_manager/tests/test_messages.py
@@ -0,0 +1,78 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid
+from common.proto import kpi_manager_pb2
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.proto.context_pb2 import DeviceId, LinkId, ServiceId, SliceId,\
+                             ConnectionId, EndPointId
+
+
+def create_kpi_id_request():
+    _create_kpi_id = kpi_manager_pb2.KpiId()
+    _create_kpi_id.kpi_id.uuid = str(uuid.uuid4())
+    return _create_kpi_id
+
+def create_kpi_descriptor_request(descriptor_name: str = "Test_name"):
+    _create_kpi_request                                    = kpi_manager_pb2.KpiDescriptor()
+    _create_kpi_request.kpi_id.kpi_id.uuid                 = str(uuid.uuid4())
+    _create_kpi_request.kpi_description                    = descriptor_name
+    _create_kpi_request.kpi_sample_type                    = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
+    _create_kpi_request.device_id.device_uuid.uuid         = 'DEV2' 
+    _create_kpi_request.service_id.service_uuid.uuid       = 'SERV2'
+    _create_kpi_request.slice_id.slice_uuid.uuid           = 'SLC1' 
+    _create_kpi_request.endpoint_id.endpoint_uuid.uuid     = 'END1' 
+    _create_kpi_request.connection_id.connection_uuid.uuid = 'CON1' 
+    _create_kpi_request.link_id.link_uuid.uuid             = 'LNK1' 
+    return _create_kpi_request
+
+def create_kpi_descriptor_request_a(description: str = "Test Description"):
+    _create_kpi_request                                    = kpi_manager_pb2.KpiDescriptor()
+    _create_kpi_request.kpi_id.kpi_id.uuid                 = str(uuid.uuid4())
+    _create_kpi_request.kpi_description                    = description
+    _create_kpi_request.kpi_sample_type                    = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
+    _create_kpi_request.device_id.device_uuid.uuid         = 'DEV4' 
+    _create_kpi_request.service_id.service_uuid.uuid       = 'SERV3'
+    _create_kpi_request.slice_id.slice_uuid.uuid           = 'SLC3' 
+    _create_kpi_request.endpoint_id.endpoint_uuid.uuid     = 'END2' 
+    _create_kpi_request.connection_id.connection_uuid.uuid = 'CON2' 
+    _create_kpi_request.link_id.link_uuid.uuid             = 'LNK2' 
+    return _create_kpi_request
+
+def create_kpi_filter_request():
+    _create_kpi_filter_request = kpi_manager_pb2.KpiDescriptorFilter()
+    _create_kpi_filter_request.kpi_sample_type.append(KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED)
+
+    device_id_obj     = DeviceId()
+    service_id_obj    = ServiceId()
+    slice_id_obj      = SliceId()
+    endpoint_id_obj   = EndPointId()
+    connection_id_obj = ConnectionId()
+    link_id_obj       = LinkId()
+
+    device_id_obj.device_uuid.uuid         = "DEV2"
+    service_id_obj.service_uuid.uuid       = "SERV2"
+    slice_id_obj.slice_uuid.uuid           = "SLC1"
+    endpoint_id_obj.endpoint_uuid.uuid     = "END1"
+    connection_id_obj.connection_uuid.uuid = "CON1"
+    link_id_obj.link_uuid.uuid             = "LNK1"
+
+    _create_kpi_filter_request.device_id.append(device_id_obj)
+    _create_kpi_filter_request.service_id.append(service_id_obj)
+    _create_kpi_filter_request.slice_id.append(slice_id_obj)
+    _create_kpi_filter_request.endpoint_id.append(endpoint_id_obj)
+    _create_kpi_filter_request.connection_id.append(connection_id_obj)
+    _create_kpi_filter_request.link_id.append(link_id_obj)
+
+    return _create_kpi_filter_request
\ No newline at end of file
diff --git a/src/kpi_value_api/.gitlab-ci.yml b/src/kpi_value_api/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..166e9d3cbcf3eb09c914384a9906853dddd7bfb5
--- /dev/null
+++ b/src/kpi_value_api/.gitlab-ci.yml
@@ -0,0 +1,109 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build kpi-value-api:
+  variables:
+    IMAGE_NAME: 'kpi_value_api' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
+    - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+
+# Apply unit test to the component
+unit_test kpi-value-api:
+  variables:
+    IMAGE_NAME: 'kpi_value_api' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build kpi-value-api
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
+    - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi
+    - docker container prune -f
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker run --name $IMAGE_NAME -d -p 30020:30020 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+    - sleep 5
+    - docker ps -a
+    - docker logs $IMAGE_NAME
+    - >
+      docker exec -i $IMAGE_NAME bash -c
+      "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}_report.xml $IMAGE_NAME/tests/test_*.py"
+    - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+  coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    - docker rm -f $IMAGE_NAME
+    - docker network rm teraflowbridge
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - src/$IMAGE_NAME/tests/Dockerfile
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+  artifacts:
+      when: always
+      reports:
+        junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
+
+## Deployment of the service in Kubernetes Cluster
+#deploy context:
+#  variables:
+#    IMAGE_NAME: 'context' # name of the microservice
+#    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+#  stage: deploy
+#  needs:
+#    - unit test context
+#    # - integ_test execute
+#  script:
+#    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+#    - kubectl get all
+#  # environment:
+#  #   name: test
+#  #   url: https://example.com
+#  #   kubernetes:
+#  #     namespace: test
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual    
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
diff --git a/src/kpi_value_api/Dockerfile b/src/kpi_value_api/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..7dd8d307b8338c4a29e97c742ca12a49c4611e0a
--- /dev/null
+++ b/src/kpi_value_api/Dockerfile
@@ -0,0 +1,68 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ git && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/kpi_value_api
+WORKDIR /var/teraflow/kpi_value_api
+COPY src/kpi_value_api/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/kpi_value_api/. kpi_value_api/
+
+# Start the service
+ENTRYPOINT ["python", "-m", "kpi_value_api.service"]
diff --git a/src/kpi_value_api/__init__.py b/src/kpi_value_api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_value_api/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_value_api/client/KpiValueApiClient.py b/src/kpi_value_api/client/KpiValueApiClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..f432271cfb7c8136f72156330b25d0b82b934d99
--- /dev/null
+++ b/src/kpi_value_api/client/KpiValueApiClient.py
@@ -0,0 +1,63 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.tools.grpc.Tools import grpc_message_to_json_string
+
+from common.proto.context_pb2 import Empty
+from common.proto.kpi_value_api_pb2 import KpiValueList, KpiValueFilter
+from common.proto.kpi_value_api_pb2_grpc import KpiValueAPIServiceStub
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 10
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class KpiValueApiClient:
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.KPIVALUEAPI) 
+        if not port: port = get_service_port_grpc(ServiceNameEnum.KPIVALUEAPI)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
+        self.channel = None
+        self.stub = None
+        self.connect()
+        LOGGER.debug('Channel created')
+
+    def connect(self):
+        self.channel = grpc.insecure_channel(self.endpoint)
+        self.stub    = KpiValueAPIServiceStub(self.channel)
+
+    def close(self):
+        if self.channel is not None: self.channel.close()
+        self.channel = None
+        self.stub = None
+    
+    @RETRY_DECORATOR
+    def StoreKpiValues(self, request: KpiValueList) -> Empty:
+        LOGGER.debug('StoreKpiValues: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.StoreKpiValues(request)
+        LOGGER.debug('StoreKpiValues result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+        
+    @RETRY_DECORATOR
+    def SelectKpiValues(self, request: KpiValueFilter) -> KpiValueList:
+        LOGGER.debug('SelectKpiValues: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SelectKpiValues(request)
+        LOGGER.debug('SelectKpiValues result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
diff --git a/src/kpi_value_api/client/__init__.py b/src/kpi_value_api/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_value_api/client/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_value_api/requirements.in b/src/kpi_value_api/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..7e4694109dc4e1d31b86abfc03162494faafcdaf
--- /dev/null
+++ b/src/kpi_value_api/requirements.in
@@ -0,0 +1,16 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+confluent-kafka==2.3.*
+requests==2.27.*
diff --git a/src/kpi_value_api/service/KpiValueApiService.py b/src/kpi_value_api/service/KpiValueApiService.py
new file mode 100644
index 0000000000000000000000000000000000000000..68b6fbdc278a00aa7cf98385bcf8afa573f91445
--- /dev/null
+++ b/src/kpi_value_api/service/KpiValueApiService.py
@@ -0,0 +1,30 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from .KpiValueApiServiceServicerImpl import KpiValueApiServiceServicerImpl
+from common.proto.kpi_value_api_pb2_grpc import add_KpiValueAPIServiceServicer_to_server
+
+
+class KpiValueApiService(GenericGrpcService):
+    def __init__(self, cls_name : str = __name__ ) -> None:
+       port = get_service_port_grpc(ServiceNameEnum.KPIVALUEAPI)
+       super().__init__(port, cls_name=cls_name)
+       self.kpiValueApiService_servicer = KpiValueApiServiceServicerImpl()
+    
+    def install_servicers(self):
+        add_KpiValueAPIServiceServicer_to_server(self.kpiValueApiService_servicer, self.server)
diff --git a/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py b/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..d27de54f3cddfd0d70d656a89c45adc50e518289
--- /dev/null
+++ b/src/kpi_value_api/service/KpiValueApiServiceServicerImpl.py
@@ -0,0 +1,118 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, grpc, requests
+from typing import Tuple, Any
+from datetime import datetime
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+from common.tools.kafka.Variables import KafkaConfig, KafkaTopic
+
+from common.proto.context_pb2 import Empty
+from common.proto.kpi_value_api_pb2_grpc import KpiValueAPIServiceServicer
+from common.proto.kpi_value_api_pb2 import KpiValueList, KpiValueFilter, KpiValue, KpiValueType
+
+from confluent_kafka import Producer as KafkaProducer
+
+
+LOGGER       = logging.getLogger(__name__)
+METRICS_POOL = MetricsPool('KpiValueAPI', 'NBIgRPC')
+PROM_URL     = "http://localhost:9090"
+
+class KpiValueApiServiceServicerImpl(KpiValueAPIServiceServicer):
+    def __init__(self):
+        LOGGER.debug('Init KpiValueApiService')
+    
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def StoreKpiValues(self, request: KpiValueList, grpc_context: grpc.ServicerContext
+                       ) -> Empty:
+        LOGGER.debug('StoreKpiValues: Received gRPC message object: {:}'.format(request))
+        producer_obj = KafkaProducer({
+            'bootstrap.servers' : KafkaConfig.SERVER_IP.value    
+        })
+        for kpi_value in request.kpi_value_list:
+            kpi_value_to_produce : Tuple [str, Any, Any] = (
+                kpi_value.kpi_id.kpi_id,            
+                kpi_value.timestamp,                
+                kpi_value.kpi_value_type            # kpi_value.kpi_value_type.(many options) how?
+            )
+            LOGGER.debug('KPI to produce is {:}'.format(kpi_value_to_produce))
+            msg_key = "gRPC-kpivalueapi"        # str(__class__.__name__) can be used
+        
+            producer_obj.produce(
+                KafkaTopic.VALUE.value, 
+                key      = msg_key,
+                value    = kpi_value.SerializeToString(),      # value = json.dumps(kpi_value_to_produce),
+                callback = self.delivery_callback
+            )
+            producer_obj.flush()
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectKpiValues(self, request: KpiValueFilter, grpc_context: grpc.ServicerContext
+                        ) -> KpiValueList:
+        LOGGER.debug('StoreKpiValues: Received gRPC message object: {:}'.format(request))
+        response = KpiValueList()
+        metrics          = [kpi.kpi_id for kpi in request.kpi_id]
+        start_timestamps = [timestamp for timestamp in request.start_timestamp]
+        end_timestamps   = [timestamp for timestamp in request.end_timestamp]
+        results = []
+
+        for start, end in zip(start_timestamps, end_timestamps):
+            start_str = datetime.fromtimestamp(start.seconds).isoformat() + "Z"
+            end_str = datetime.fromtimestamp(end.seconds).isoformat() + "Z"
+
+            for metric in metrics:
+                url    = f'{PROM_URL}/api/v1/query_range'
+                params = {
+                    'query': metric,
+                    'start': start_str,
+                    'end'  : end_str,
+                    'step' : '30s'           # or any other step you need
+                }
+                response = requests.get(url, params=params)
+                if response.status_code == 200:
+                    data = response.json()
+                    for result in data['data']['result']:
+                        for value in result['values']:
+                            kpi_value = KpiValue(
+                                kpi_id=metric,
+                                timestamp=str(seconds=value[0]),
+                                kpi_value_type=self._convert_value_to_kpi_value_type(value[1])
+                            )
+                            results.append(kpi_value)
+
+    def _convert_value_to_kpi_value_type(self, value):
+        # Check if the value is an integer (int64)
+        try:
+            int64_value = int(value)
+            return KpiValueType(int64Val=int64_value)
+        except ValueError:
+            pass
+        # Check if the value is a float
+        try:
+            float_value = float(value)
+            return KpiValueType(floatVal=float_value)
+        except ValueError:
+            pass
+        # Check if the value is a boolean
+        if value.lower() in ['true', 'false']:
+            bool_value = value.lower() == 'true'
+            return KpiValueType(boolVal=bool_value)
+        # If none of the above, treat it as a string
+        return KpiValueType(stringVal=value)
+
+
+    def delivery_callback(self, err, msg):
+        if err: LOGGER.debug('Message delivery failed: {:}'.format(err))
+        else:   LOGGER.debug('Message delivered to topic {:}'.format(msg.topic()))
diff --git a/src/kpi_value_api/service/__init__.py b/src/kpi_value_api/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_value_api/service/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_value_api/service/__main__.py b/src/kpi_value_api/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0f265a48812c0ae475e4e079a09b83cdfb7c69e
--- /dev/null
+++ b/src/kpi_value_api/service/__main__.py
@@ -0,0 +1,51 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from common.Settings import  get_log_level
+from .KpiValueApiService import KpiValueApiService
+
+terminate = threading.Event()
+LOGGER = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global LOGGER # pylint: disable=global-statement
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level)
+    LOGGER = logging.getLogger(__name__)
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.debug('Starting...')
+
+    grpc_service = KpiValueApiService()
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=1.0): pass
+
+    LOGGER.debug('Terminating...')
+    grpc_service.stop()
+
+    LOGGER.debug('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/kpi_value_api/tests/messages.py b/src/kpi_value_api/tests/messages.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2a1cbb0b275fb26d6498e4470f3869a105a8d36
--- /dev/null
+++ b/src/kpi_value_api/tests/messages.py
@@ -0,0 +1,35 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid, time
+from common.proto.kpi_value_api_pb2 import KpiValue, KpiValueList
+
+
+def create_kpi_value_list():
+    _create_kpi_value_list = KpiValueList()
+    # To run this experiment sucessfully, already existing UUID in KPI DB in necessary.
+    # because the UUID is used to get the descriptor form KPI DB.
+    EXISTING_KPI_IDs = ["725ce3ad-ac67-4373-bd35-8cd9d6a86e09",
+                        str(uuid.uuid4()), 
+                        str(uuid.uuid4())]
+
+    for kpi_id_uuid in EXISTING_KPI_IDs:
+        kpi_value_object = KpiValue()
+        kpi_value_object.kpi_id.kpi_id.uuid      = kpi_id_uuid
+        kpi_value_object.timestamp.timestamp     = float(time.time())
+        kpi_value_object.kpi_value_type.floatVal = 100
+
+        _create_kpi_value_list.kpi_value_list.append(kpi_value_object)
+
+    return _create_kpi_value_list
diff --git a/src/kpi_value_api/tests/test_kpi_value_api.py b/src/kpi_value_api/tests/test_kpi_value_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..307b5cdad4e6503a774e308f669fc44762f84bf1
--- /dev/null
+++ b/src/kpi_value_api/tests/test_kpi_value_api.py
@@ -0,0 +1,84 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os, logging, pytest
+from common.proto.context_pb2 import Empty
+from common.Constants import ServiceNameEnum
+from common.tools.kafka.Variables import KafkaTopic
+from common.Settings import ( 
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc)
+from kpi_value_api.service.KpiValueApiService import KpiValueApiService
+from kpi_value_api.client.KpiValueApiClient import KpiValueApiClient
+from kpi_value_api.tests.messages import create_kpi_value_list
+
+
+LOCAL_HOST = '127.0.0.1'
+KPIVALUEAPI_SERVICE_PORT = get_service_port_grpc(ServiceNameEnum.KPIVALUEAPI)  # type: ignore
+os.environ[get_env_var_name(ServiceNameEnum.KPIVALUEAPI, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.KPIVALUEAPI, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(KPIVALUEAPI_SERVICE_PORT)
+LOGGER = logging.getLogger(__name__)
+
+# This fixture will be requested by test cases and last during testing session
+@pytest.fixture(scope='session')
+def kpi_value_api_service():
+    LOGGER.info('Initializing KpiValueApiService...')
+    # _service = MonitoringService(name_mapping)
+    _service = KpiValueApiService()
+    _service.start()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding KpiValueApiService...')
+    yield _service
+
+    LOGGER.info('Terminating KpiValueApiService...')
+    _service.stop()
+
+    LOGGER.info('Terminated KpiValueApiService...')
+
+# This fixture will be requested by test cases and last during testing session.
+# The client requires the server, so client fixture has the server as dependency.
+@pytest.fixture(scope='session')
+def kpi_value_api_client(kpi_value_api_service : KpiValueApiService ):
+    LOGGER.info('Initializing KpiValueApiClient...')
+    _client = KpiValueApiClient()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding KpiValueApiClient...')
+    yield _client
+
+    LOGGER.info('Closing KpiValueApiClient...')
+    _client.close()
+
+    LOGGER.info('Closed KpiValueApiClient...')
+
+##################################################
+# Prepare Environment, should be the first test
+##################################################
+
+# To be added here
+
+###########################
+# Tests Implementation of Kpi Value Api
+###########################
+
+def test_validate_kafka_topics():
+    LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ")
+    response = KafkaTopic.create_all_topics()
+    assert isinstance(response, bool)
+
+def test_store_kpi_values(kpi_value_api_client):
+    LOGGER.debug(" >>> test_set_list_of_KPIs: START <<< ")
+    response = kpi_value_api_client.StoreKpiValues(create_kpi_value_list())
+    assert isinstance(response, Empty)
diff --git a/src/kpi_value_writer/.gitlab-ci.yml b/src/kpi_value_writer/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..25619ce7f8b4346172587dbf2e804896aff20e4d
--- /dev/null
+++ b/src/kpi_value_writer/.gitlab-ci.yml
@@ -0,0 +1,109 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build kpi-value-writer:
+  variables:
+    IMAGE_NAME: 'kpi_value_writer' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
+    - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+
+# Apply unit test to the component
+unit_test kpi-value-writer:
+  variables:
+    IMAGE_NAME: 'kpi_value_writer' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build kpi-value-writer
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi
+    - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME container is not in the system"; fi
+    - docker container prune -f
+  script:
+    - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker run --name $IMAGE_NAME -d -p 30030:30030 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+    - sleep 5
+    - docker ps -a
+    - docker logs $IMAGE_NAME
+    - >
+      docker exec -i $IMAGE_NAME bash -c
+      "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}_report.xml $IMAGE_NAME/tests/test_*.py"
+    - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+  coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    - docker rm -f $IMAGE_NAME
+    - docker network rm teraflowbridge
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - src/$IMAGE_NAME/tests/Dockerfile
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+  artifacts:
+      when: always
+      reports:
+        junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
+
+## Deployment of the service in Kubernetes Cluster
+#deploy context:
+#  variables:
+#    IMAGE_NAME: 'context' # name of the microservice
+#    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+#  stage: deploy
+#  needs:
+#    - unit test context
+#    # - integ_test execute
+#  script:
+#    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+#    - kubectl get all
+#  # environment:
+#  #   name: test
+#  #   url: https://example.com
+#  #   kubernetes:
+#  #     namespace: test
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual    
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
diff --git a/src/kpi_value_writer/Dockerfile b/src/kpi_value_writer/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..70f41128bd8c982f604a3424d2096c918ead080e
--- /dev/null
+++ b/src/kpi_value_writer/Dockerfile
@@ -0,0 +1,70 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ git && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/kpi_value_writer
+WORKDIR /var/teraflow/kpi_value_writer
+COPY src/kpi_value_writer/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/kpi_value_writer/. kpi_value_writer/
+COPY src/kpi_manager/__init__.py kpi_manager/__init__.py
+COPY src/kpi_manager/client/. kpi_manager/client/
+
+# Start the service
+ENTRYPOINT ["python", "-m", "kpi_value_writer.service"]
diff --git a/src/kpi_value_writer/README.md b/src/kpi_value_writer/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..72ba6e5594adeef4a29d650615716c26273ed115
--- /dev/null
+++ b/src/kpi_value_writer/README.md
@@ -0,0 +1,29 @@
+# How to locally run and test KPI manager micro-service
+
+## --- File links need to be updated. ---
+### Pre-requisets 
+The following requirements should be fulfilled before the execuation of KPI management service.
+
+1. Verify that [kpi_management.proto](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/proto/kpi_management.proto) file exists and grpcs file are generated sucessfully. 
+2. Virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/requirements.in) are installed sucessfully.
+3. Verify the creation of required database and table.
+[KPI DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/database/tests/KpiDBtests.py) python file enlist the functions to create tables and database and
+[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/service/database/KpiEngine.py) contains the DB string, update the string as per your deployment.
+
+### Messages format templates
+["Messages"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_messages.py) python file enlist the basic gRPC messages format used during the testing.
+
+### Test file
+["KPI management test"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_management/kpi_manager/tests/test_kpi_manager.py) python file enlist different tests conducted during the experiment.
+
+### Flow of execution (Kpi Maanager Service functions)
+1. Call the `create_database()` and `create_tables()` functions from `Kpi_DB` class to create the required database and table if they don't exist. Call `verify_tables` to verify the existence of KPI table.
+
+2. Call the gRPC method `SetKpiDescriptor(KpiDescriptor)->KpiId` to add the KpiDescriptor in `Kpi` DB. `KpiDescriptor` and `KpiId` are both pre-defined gRPC message types.
+
+3. Call `GetKpiDescriptor(KpiId)->KpiDescriptor` to read the `KpiDescriptor` from DB and `DeleteKpiDescriptor(KpiId)` to delete the `KpiDescriptor` from DB.
+
+4. Call `SelectKpiDescriptor(KpiDescriptorFilter)->KpiDescriptorList` to get all `KpiDescriptor` objects that matches the filter criteria. `KpiDescriptorFilter` and `KpiDescriptorList` are pre-defined gRPC message types.
+
+## For KPI composer and KPI writer
+The functionalities of KPI composer and writer is heavily dependent upon Telemetery service. Therfore, these services has other pre-requsites that are mention [here](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/requirements.in).
\ No newline at end of file
diff --git a/src/kpi_value_writer/__init__.py b/src/kpi_value_writer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_value_writer/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_value_writer/requirements.in b/src/kpi_value_writer/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..7e4694109dc4e1d31b86abfc03162494faafcdaf
--- /dev/null
+++ b/src/kpi_value_writer/requirements.in
@@ -0,0 +1,16 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+confluent-kafka==2.3.*
+requests==2.27.*
diff --git a/src/kpi_value_writer/service/KpiValueWriter.py b/src/kpi_value_writer/service/KpiValueWriter.py
new file mode 100644
index 0000000000000000000000000000000000000000..26bab44657606b1f3edc14659d128c5ccc7a6890
--- /dev/null
+++ b/src/kpi_value_writer/service/KpiValueWriter.py
@@ -0,0 +1,103 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import threading
+from common.tools.kafka.Variables import KafkaConfig, KafkaTopic
+from common.proto.kpi_value_api_pb2 import KpiValue
+from common.proto.kpi_manager_pb2 import KpiDescriptor, KpiId
+from common.Settings import get_service_port_grpc
+from common.Constants import ServiceNameEnum
+from common.tools.service.GenericGrpcService import GenericGrpcService
+
+
+from confluent_kafka import KafkaError
+from confluent_kafka import Consumer as KafkaConsumer
+
+from kpi_manager.client.KpiManagerClient import KpiManagerClient
+# -- test import --
+# from kpi_value_writer.tests.test_messages import create_kpi_descriptor_request
+from .MetricWriterToPrometheus import MetricWriterToPrometheus
+
+
+LOGGER           = logging.getLogger(__name__)
+ACTIVE_CONSUMERS = []
+METRIC_WRITER    = MetricWriterToPrometheus()
+
+class KpiValueWriter(GenericGrpcService):
+    def __init__(self, cls_name : str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.KPIVALUEWRITER)
+        super().__init__(port, cls_name=cls_name)
+
+    @staticmethod
+    def RunKafkaConsumer():
+        thread = threading.Thread(target=KpiValueWriter.KafkaConsumer, args=())
+        ACTIVE_CONSUMERS.append(thread)
+        thread.start()
+
+    @staticmethod
+    def KafkaConsumer():
+        kafka_consumer  = KafkaConsumer(
+            { 'bootstrap.servers' : KafkaConfig.SERVER_IP.value,
+              'group.id'          : __class__,
+              'auto.offset.reset' : 'latest'}
+        )
+        kpi_manager_client = KpiManagerClient()
+        kafka_consumer.subscribe([KafkaTopic.VALUE.value])
+        LOGGER.debug("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value))
+        print("Kafka Consumer start listenng on topic: {:}".format(KafkaTopic.VALUE.value))
+        while True:
+            raw_kpi = kafka_consumer.poll(1.0)
+            if raw_kpi is None:
+                continue
+            elif raw_kpi.error():
+                if raw_kpi.error().code() == KafkaError._PARTITION_EOF:
+                    continue
+                else:
+                    print("Consumer error: {}".format(raw_kpi.error()))
+                    continue
+            try:
+                kpi_value = KpiValue()
+                kpi_value.ParseFromString(raw_kpi.value())
+                LOGGER.info("Received KPI : {:}".format(kpi_value))
+                print("Received KPI : {:}".format(kpi_value))
+                KpiValueWriter.get_kpi_descriptor(kpi_value, kpi_manager_client)
+            except Exception as e:
+                print("Error detail: {:}".format(e))
+                continue
+
+    @staticmethod
+    def get_kpi_descriptor(kpi_value: str, kpi_manager_client ):
+        print("--- START -----")
+
+        kpi_id = KpiId()
+        kpi_id.kpi_id.uuid = kpi_value.kpi_id.kpi_id.uuid
+        print("KpiId generated: {:}".format(kpi_id))
+        # print("Kpi manger client created: {:}".format(kpi_manager_client))
+
+        try:
+            kpi_descriptor_object = KpiDescriptor()
+            kpi_descriptor_object = kpi_manager_client.GetKpiDescriptor(kpi_id)
+            if kpi_descriptor_object.kpi_id.kpi_id.uuid == kpi_id.kpi_id.uuid:
+            # print("kpi descriptor received: {:}".format(kpi_descriptor_object))
+            # if isinstance (kpi_descriptor_object, KpiDescriptor):
+                LOGGER.info("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object))
+                print("Extracted KpiDescriptor: {:}".format(kpi_descriptor_object))
+                METRIC_WRITER.create_and_expose_cooked_kpi(kpi_descriptor_object, kpi_value)
+            else:
+                LOGGER.info("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id))
+                print("No KPI Descriptor found in DB for Kpi ID: {:}".format(kpi_id))
+        except Exception as e:
+            LOGGER.info("Unable to get KpiDescriptor. Error: {:}".format(e))
+            print ("Unable to get KpiDescriptor. Error: {:}".format(e))
diff --git a/src/kpi_value_writer/service/MetricWriterToPrometheus.py b/src/kpi_value_writer/service/MetricWriterToPrometheus.py
new file mode 100644
index 0000000000000000000000000000000000000000..b681164786bd310d457998bae55b836522888b94
--- /dev/null
+++ b/src/kpi_value_writer/service/MetricWriterToPrometheus.py
@@ -0,0 +1,96 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# read Kafka stream from Kafka topic
+
+import ast
+import time
+import threading
+import logging
+from prometheus_client import start_http_server, Gauge, CollectorRegistry
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+
+from common.proto.kpi_value_api_pb2 import KpiValue
+from common.proto.kpi_manager_pb2 import KpiDescriptor
+
+LOGGER         = logging.getLogger(__name__)
+PROM_METRICS   = {}
+PROM_REGISTERY = CollectorRegistry()
+
+class MetricWriterToPrometheus:
+    '''
+    This class exposes the *cooked KPI* on the endpoint to be scraped by the Prometheus server.
+    cooked KPI value = KpiDescriptor (gRPC message) + KpiValue (gRPC message)
+    '''
+    def __init__(self):
+        # prometheus server address and configs
+        self.start_prometheus_client()
+        pass
+    
+    def start_prometheus_client(self):
+        start_http_server(10808, registry=PROM_REGISTERY)
+        LOGGER.debug("Prometheus client is started on port 10808")
+
+    def merge_kpi_descriptor_and_kpi_value(self, kpi_descriptor, kpi_value):
+            # Creating a dictionary from the kpi_descriptor's attributes
+            cooked_kpi = {
+                'kpi_id'         : kpi_descriptor.kpi_id.kpi_id.uuid,
+                'kpi_description': kpi_descriptor.kpi_description,
+                'kpi_sample_type': KpiSampleType.Name(kpi_descriptor.kpi_sample_type),
+                'device_id'      : kpi_descriptor.device_id.device_uuid.uuid,
+                'endpoint_id'    : kpi_descriptor.endpoint_id.endpoint_uuid.uuid,
+                'service_id'     : kpi_descriptor.service_id.service_uuid.uuid,
+                'slice_id'       : kpi_descriptor.slice_id.slice_uuid.uuid,
+                'connection_id'  : kpi_descriptor.connection_id.connection_uuid.uuid,
+                'link_id'        : kpi_descriptor.link_id.link_uuid.uuid,
+                'time_stamp'      : kpi_value.timestamp.timestamp,
+                'kpi_value'      : kpi_value.kpi_value_type.floatVal
+            }
+            # LOGGER.debug("Cooked Kpi: {:}".format(cooked_kpi))
+            return cooked_kpi
+
+    def create_and_expose_cooked_kpi(self, kpi_descriptor: KpiDescriptor, kpi_value: KpiValue):
+        # merge both gRPC messages into single varible.
+        cooked_kpi = self.merge_kpi_descriptor_and_kpi_value(kpi_descriptor, kpi_value)
+        tags_to_exclude = {'kpi_description', 'kpi_sample_type', 'kpi_value'} # extracted values will be used as metric tag
+        metric_tags = [tag for tag in cooked_kpi.keys() if tag not in tags_to_exclude]
+        metric_name = cooked_kpi['kpi_sample_type']
+        try:
+            if metric_name not in PROM_METRICS:     # Only register the metric, when it doesn't exists
+                PROM_METRICS[metric_name] = Gauge ( 
+                    metric_name,
+                    cooked_kpi['kpi_description'],
+                    metric_tags,
+                    registry=PROM_REGISTERY
+                )
+            LOGGER.debug("Metric is created with labels: {:}".format(metric_tags))
+            PROM_METRICS[metric_name].labels(
+                    kpi_id          = cooked_kpi['kpi_id'],
+                    device_id       = cooked_kpi['device_id'],
+                    endpoint_id     = cooked_kpi['endpoint_id'],
+                    service_id      = cooked_kpi['service_id'],
+                    slice_id        = cooked_kpi['slice_id'],
+                    connection_id   = cooked_kpi['connection_id'],
+                    link_id         = cooked_kpi['link_id'],
+                    time_stamp      = cooked_kpi['time_stamp'],
+                ).set(float(cooked_kpi['kpi_value']))
+            LOGGER.debug("Metric pushed to the endpoints: {:}".format(PROM_METRICS[metric_name]))
+
+        except ValueError as e:
+            if 'Duplicated timeseries' in str(e):
+                LOGGER.debug("Metric {:} is already registered. Skipping.".format(metric_name))
+                print("Metric {:} is already registered. Skipping.".format(metric_name))
+            else:
+                LOGGER.error("Error while pushing metric: {}".format(e))
+                raise
\ No newline at end of file
diff --git a/src/kpi_value_writer/service/__init__.py b/src/kpi_value_writer/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/kpi_value_writer/service/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/kpi_value_writer/service/__main__.py b/src/kpi_value_writer/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa67540fb899781297d1235dc2e15bcbb2c38585
--- /dev/null
+++ b/src/kpi_value_writer/service/__main__.py
@@ -0,0 +1,51 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from kpi_value_writer.service.KpiValueWriter import KpiValueWriter
+from common.Settings import get_log_level
+
+terminate = threading.Event()
+LOGGER = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global LOGGER # pylint: disable=global-statement
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level)
+    LOGGER = logging.getLogger(__name__)
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.debug('Starting...')
+
+    grpc_service = KpiValueWriter()
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=1.0): pass
+
+    LOGGER.debug('Terminating...')
+    grpc_service.stop()
+
+    LOGGER.debug('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/kpi_value_writer/tests/test_kpi_value_writer.py b/src/kpi_value_writer/tests/test_kpi_value_writer.py
new file mode 100755
index 0000000000000000000000000000000000000000..572495d48d70cdc40c0ef6bb1efcf877e2a610ee
--- /dev/null
+++ b/src/kpi_value_writer/tests/test_kpi_value_writer.py
@@ -0,0 +1,52 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from kpi_value_writer.service.KpiValueWriter import KpiValueWriter
+from common.tools.kafka.Variables import KafkaTopic
+from kpi_manager.client.KpiManagerClient import KpiManagerClient
+from kpi_manager.tests.test_messages import create_kpi_descriptor_request
+from common.proto.kpi_manager_pb2 import KpiDescriptor
+from kpi_value_writer.tests.test_messages import create_kpi_id_request
+
+LOGGER = logging.getLogger(__name__)
+
+# def test_GetKpiDescriptor():
+#     LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ")
+#     kpi_manager_client = KpiManagerClient()
+#     # adding KPI
+#     LOGGER.info(" --->>> calling SetKpiDescriptor ")
+#     response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request())
+#     # get KPI
+#     LOGGER.info(" --->>> calling GetKpiDescriptor with response ID")
+#     response = kpi_manager_client.GetKpiDescriptor(response_id)
+#     LOGGER.info("Response gRPC message object: {:}".format(response))
+    
+#     LOGGER.info(" --->>> calling GetKpiDescriptor with random ID")
+#     rand_response = kpi_manager_client.GetKpiDescriptor(create_kpi_id_request())
+#     LOGGER.info("Response gRPC message object: {:}".format(rand_response))
+
+#     LOGGER.info("\n------------------ TEST FINISHED ---------------------\n")
+#     assert isinstance(response, KpiDescriptor)
+
+# -------- Initial Test ----------------
+def test_validate_kafka_topics():
+    LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ")
+    response = KafkaTopic.create_all_topics()
+    assert isinstance(response, bool)
+
+def test_KafkaConsumer():
+    LOGGER.debug(" --->>> test_kafka_consumer: START <<<--- ")
+    KpiValueWriter.RunKafkaConsumer()
+
diff --git a/src/kpi_value_writer/tests/test_messages.py b/src/kpi_value_writer/tests/test_messages.py
new file mode 100755
index 0000000000000000000000000000000000000000..89a41fa08ad37b7d9b305bba6e7c445fea5cd18a
--- /dev/null
+++ b/src/kpi_value_writer/tests/test_messages.py
@@ -0,0 +1,44 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid, time
+import random
+from common.proto import kpi_manager_pb2
+from common.proto.kpi_value_api_pb2 import KpiValue
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+
+def create_kpi_id_request():
+    _create_kpi_id = kpi_manager_pb2.KpiId()
+    _create_kpi_id.kpi_id.uuid = str(uuid.uuid4())
+    return _create_kpi_id
+
+def create_kpi_descriptor_request(description: str = "Test Description"):
+    _create_kpi_request                                    = kpi_manager_pb2.KpiDescriptor()
+    _create_kpi_request.kpi_id.kpi_id.uuid                 = str(uuid.uuid4())
+    _create_kpi_request.kpi_description                    = description
+    _create_kpi_request.kpi_sample_type                    = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
+    _create_kpi_request.device_id.device_uuid.uuid         = 'DEV4'  
+    _create_kpi_request.service_id.service_uuid.uuid       = 'SERV3' 
+    _create_kpi_request.slice_id.slice_uuid.uuid           = 'SLC3'  
+    _create_kpi_request.endpoint_id.endpoint_uuid.uuid     = 'END2'  
+    _create_kpi_request.connection_id.connection_uuid.uuid = 'CON2'  
+    _create_kpi_request.link_id.link_uuid.uuid             = 'LNK2'  
+    return _create_kpi_request
+
+def create_kpi_value_request():
+    _create_kpi_value_request                         = KpiValue()
+    _create_kpi_value_request.kpi_id.kpi_id.uuid      = str(uuid.uuid4())
+    _create_kpi_value_request.timestamp.timestamp     = time.time()
+    _create_kpi_value_request.kpi_value_type.floatVal = random.randint(10, 10000)
+    return _create_kpi_value_request
diff --git a/src/kpi_value_writer/tests/test_metric_writer_to_prom.py b/src/kpi_value_writer/tests/test_metric_writer_to_prom.py
new file mode 100644
index 0000000000000000000000000000000000000000..f60e96253ae8edb29eedcbe2d6e66aaeb450229c
--- /dev/null
+++ b/src/kpi_value_writer/tests/test_metric_writer_to_prom.py
@@ -0,0 +1,28 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import threading
+import logging
+from kpi_value_writer.service.MetricWriterToPrometheus import MetricWriterToPrometheus
+from kpi_value_writer.tests.test_messages import create_kpi_descriptor_request, create_kpi_value_request
+
+LOGGER = logging.getLogger(__name__)
+
+def test_metric_writer_to_prometheus():
+    LOGGER.info(' >>> test_metric_writer_to_prometheus START <<< ')
+    metric_writer_obj = MetricWriterToPrometheus()
+    metric_writer_obj.create_and_expose_cooked_kpi(
+                        create_kpi_descriptor_request(),
+                        create_kpi_value_request()
+        )
diff --git a/src/l3_centralizedattackdetector/requirements.in b/src/l3_centralizedattackdetector/requirements.in
index 34513101381471027dbdbab4d3a615e057acd92e..14808cba5d26a479095ff112b505febff095bdcd 100644
--- a/src/l3_centralizedattackdetector/requirements.in
+++ b/src/l3_centralizedattackdetector/requirements.in
@@ -12,6 +12,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-numpy==1.23.*
+numpy<2.0.0
 onnxruntime==1.12.*
 scikit-learn==1.1.*
diff --git a/src/l3_distributedattackdetector/requirements.in b/src/l3_distributedattackdetector/requirements.in
index 6deb8d906f733e25bfac07bbe82b536b4774f5bb..1d2fbafc26397ee41314686a202938d42c9a22c0 100644
--- a/src/l3_distributedattackdetector/requirements.in
+++ b/src/l3_distributedattackdetector/requirements.in
@@ -12,5 +12,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-numpy==1.23.*
+numpy<2.0.0
 asyncio==3.4.3
diff --git a/src/monitoring/requirements.in b/src/monitoring/requirements.in
index 8684cb22350416818c20e881993a62d4f10a2e9e..3b67c00ee6056de089cde8d9b7faeef05d75336a 100644
--- a/src/monitoring/requirements.in
+++ b/src/monitoring/requirements.in
@@ -18,7 +18,7 @@ APScheduler==3.10.1
 #google-api-core
 #opencensus[stackdriver]
 #google-cloud-profiler
-#numpy
+numpy<2.0.0
 #Jinja2==3.0.3
 #ncclient==0.6.13
 #p4runtime==1.3.0
diff --git a/src/nbi/requirements.in b/src/nbi/requirements.in
index 78d941974c62e32251373a805056068608b0bda2..4c5460a8e2b3c05d994bbaba4bd2939e629db1e2 100644
--- a/src/nbi/requirements.in
+++ b/src/nbi/requirements.in
@@ -22,5 +22,6 @@ libyang==2.8.0
 netaddr==0.9.0
 pyang==2.6.0
 git+https://github.com/robshakir/pyangbind.git
+pydantic==2.6.3
 requests==2.27.1
 werkzeug==2.3.7
diff --git a/src/nbi/service/__main__.py b/src/nbi/service/__main__.py
index 362b0116d6f0bdbc4d1fa2025c09ac23c828617f..58fbb9625addc43c6b62d06d7a9caa3f648203d5 100644
--- a/src/nbi/service/__main__.py
+++ b/src/nbi/service/__main__.py
@@ -18,13 +18,16 @@ from common.Constants import ServiceNameEnum
 from common.Settings import (
     ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
     wait_for_environment_variables)
+
 from .NbiService import NbiService
 from .rest_server.RestServer import RestServer
 from .rest_server.nbi_plugins.etsi_bwm import register_etsi_bwm_api
+from .rest_server.nbi_plugins.ietf_hardware import register_ietf_hardware
 from .rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn
 from .rest_server.nbi_plugins.ietf_l3vpn import register_ietf_l3vpn
 from .rest_server.nbi_plugins.ietf_network import register_ietf_network
 from .rest_server.nbi_plugins.ietf_network_slice import register_ietf_nss
+from .rest_server.nbi_plugins.ietf_acl import register_ietf_acl
 from .rest_server.nbi_plugins.tfs_api import register_tfs_api
 
 terminate = threading.Event()
@@ -63,13 +66,23 @@ def main():
 
     rest_server = RestServer()
     register_etsi_bwm_api(rest_server)
+    register_ietf_hardware(rest_server)
     register_ietf_l2vpn(rest_server)  # Registering L2VPN entrypoint
     register_ietf_l3vpn(rest_server)  # Registering L3VPN entrypoint
     register_ietf_network(rest_server)
     register_ietf_nss(rest_server)  # Registering NSS entrypoint
+    register_ietf_acl(rest_server)
     register_tfs_api(rest_server)
     rest_server.start()
 
+    LOGGER.debug('Configured Resources:')
+    for resource in rest_server.api.resources:
+        LOGGER.debug(' - {:s}'.format(str(resource)))
+
+    LOGGER.debug('Configured Rules:')
+    for rule in rest_server.app.url_map.iter_rules():
+        LOGGER.debug(' - {:s}'.format(str(rule)))
+
     # Wait for Ctrl+C or termination signal
     while not terminate.wait(timeout=1.0): pass
 
diff --git a/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Resources.py b/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Resources.py
index 4c6ad47bc210316908ed3e3676abbda6757cf615..7f9360e00f5891b6cac0ae5020bd4fbc5ab7d9c1 100644
--- a/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Resources.py
+++ b/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Resources.py
@@ -13,12 +13,15 @@
 # limitations under the License.
 
 import copy, deepmerge, json, logging
+from typing import Dict
+from flask_restful import Resource, request
+from werkzeug.exceptions import UnsupportedMediaType
 from common.Constants import DEFAULT_CONTEXT_NAME
 from context.client.ContextClient import ContextClient
-from flask_restful import Resource, request
 from service.client.ServiceClient import ServiceClient
 from .Tools import (
-    format_grpc_to_json, grpc_context_id, grpc_service_id, bwInfo_2_service, service_2_bwInfo)
+    format_grpc_to_json, grpc_context_id, grpc_service_id, bwInfo_2_service, service_2_bwInfo
+)
 
 LOGGER = logging.getLogger(__name__)
 
@@ -37,15 +40,20 @@ class BwInfo(_Resource):
         return bw_allocations
 
     def post(self):
-        bwinfo = request.get_json()
-        service = bwInfo_2_service(self.client, bwinfo)
+        if not request.is_json:
+            raise UnsupportedMediaType('JSON payload is required')
+        request_data: Dict = request.get_json()
+        service = bwInfo_2_service(self.client, request_data)
         stripped_service = copy.deepcopy(service)
         stripped_service.ClearField('service_endpoint_ids')
         stripped_service.ClearField('service_constraints')
         stripped_service.ClearField('service_config')
 
-        response = format_grpc_to_json(self.service_client.CreateService(stripped_service))
-        response = format_grpc_to_json(self.service_client.UpdateService(service))
+        try:
+            response = format_grpc_to_json(self.service_client.CreateService(stripped_service))
+            response = format_grpc_to_json(self.service_client.UpdateService(service))
+        except Exception as e: # pylint: disable=broad-except
+            return e
 
         return response
 
diff --git a/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Tools.py b/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Tools.py
index 59436708cca2fcf7ff0ff65aa4977e2ccfaeda95..55efa48b12b61cb44c23fc2995679afe38351368 100644
--- a/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Tools.py
+++ b/src/nbi/service/rest_server/nbi_plugins/etsi_bwm/Tools.py
@@ -12,20 +12,39 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import json
-import logging
-import time
+import json, logging, re, time
 from decimal import ROUND_HALF_EVEN, Decimal
 from flask.json import jsonify
 from common.proto.context_pb2 import (
-    ContextId, Empty, EndPointId, ServiceId, ServiceTypeEnum, Service, Constraint, Constraint_SLA_Capacity,
-    ConfigRule, ConfigRule_Custom, ConfigActionEnum)
+    ContextId, Empty, EndPointId, ServiceId, ServiceStatusEnum, ServiceTypeEnum,
+    Service, Constraint, Constraint_SLA_Capacity, ConfigRule, ConfigRule_Custom,
+    ConfigActionEnum
+)
+from common.tools.grpc.ConfigRules import update_config_rule_custom
 from common.tools.grpc.Tools import grpc_message_to_json
 from common.tools.object_factory.Context import json_context_id
 from common.tools.object_factory.Service import json_service_id
 
 LOGGER = logging.getLogger(__name__)
 
+ENDPOINT_SETTINGS_KEY = '/device[{:s}]/endpoint[{:s}]/vlan[{:d}]/settings'
+DEVICE_SETTINGS_KEY = '/device[{:s}]/settings'
+RE_CONFIG_RULE_IF_SUBIF = re.compile(r'^\/interface\[([^\]]+)\]\/subinterface\[([^\]]+)\]$')
+MEC_CONSIDERED_FIELDS = ['requestType', 'sessionFilter', 'fixedAllocation', 'allocationDirection', 'fixedBWPriority']
+ALLOCATION_DIRECTION_DESCRIPTIONS = {
+    '00' : 'Downlink (towards the UE)',
+    '01' : 'Uplink (towards the application/session)',
+    '10' : 'Symmetrical'}
+VLAN_TAG = 0
+PREFIX_LENGTH = 24
+BGP_AS = 65000
+POLICY_AZ = 'srv_{:d}_a'.format(VLAN_TAG)
+POLICY_ZA = 'srv_{:d}_b'.format(VLAN_TAG)
+BGP_NEIGHBOR_IP_A = '192.168.150.1'
+BGP_NEIGHBOR_IP_Z = '192.168.150.2'
+ROUTER_ID_A = '200.1.1.1'
+ROUTER_ID_Z = '200.1.1.2'
+ROUTE_DISTINGUISHER = '{:5d}:{:03d}'.format(BGP_AS, VLAN_TAG)
 
 def service_2_bwInfo(service: Service) -> dict:
     response = {}
@@ -40,12 +59,19 @@ def service_2_bwInfo(service: Service) -> dict:
             break
 
     for config_rule in service.service_config.config_rules:
+        resource_value_json = json.loads(config_rule.custom.resource_value)
+        if config_rule.custom.resource_key != '/request':
+            continue
         for key in ['allocationDirection', 'fixedBWPriority', 'requestType', 'sourceIp', 'sourcePort', 'dstPort', 'protocol', 'sessionFilter']:
-            if config_rule.custom.resource_key == key:
-                if key != 'sessionFilter':
-                    response[key] = config_rule.custom.resource_value
-                else:
-                    response[key] = json.loads(config_rule.custom.resource_value)
+            if key not in resource_value_json: 
+                continue
+
+            if key == 'sessionFilter':
+                response[key] = [resource_value_json[key]]
+            elif key == 'requestType':
+                response[key] = str(resource_value_json[key])
+            else:
+                response[key] = resource_value_json[key]
 
     unixtime = time.time()
     response['timeStamp'] = { # Time stamp to indicate when the corresponding information elements are sent
@@ -55,47 +81,108 @@ def service_2_bwInfo(service: Service) -> dict:
 
     return response
 
-def bwInfo_2_service(client, bwInfo: dict) -> Service:
+def bwInfo_2_service(client, bw_info: dict) -> Service:
+    # add description to allocationDirection code
+    if 'sessionFilter' in bw_info:
+        bw_info['sessionFilter'] = bw_info['sessionFilter'][0] # Discard other items in sessionFilter field
+
     service = Service()
-    for key in ['allocationDirection', 'fixedBWPriority', 'requestType', 'timeStamp', 'sessionFilter']:
-        if key not in bwInfo:
-            continue
-        config_rule = ConfigRule()
-        config_rule.action = ConfigActionEnum.CONFIGACTION_SET
-        config_rule_custom = ConfigRule_Custom()
-        config_rule_custom.resource_key  = key
-        if key != 'sessionFilter':
-            config_rule_custom.resource_value  = str(bwInfo[key])
-        else:
-            config_rule_custom.resource_value  = json.dumps(bwInfo[key])
-        config_rule.custom.CopyFrom(config_rule_custom)
-        service.service_config.config_rules.append(config_rule)
-
-    if 'sessionFilter' in bwInfo:
-        a_ip = bwInfo['sessionFilter'][0]['sourceIp']
-        z_ip = bwInfo['sessionFilter'][0]['dstAddress']
+    
+    service_config_rules = service.service_config.config_rules
+
+
+    request_cr_key = '/request'
+    request_cr_value = {k:bw_info[k] for k in MEC_CONSIDERED_FIELDS}
+
+    config_rule = ConfigRule()
+    config_rule.action = ConfigActionEnum.CONFIGACTION_SET
+    config_rule_custom = ConfigRule_Custom()
+    config_rule_custom.resource_key  = request_cr_key
+    config_rule_custom.resource_value = json.dumps(request_cr_value)
+    config_rule.custom.CopyFrom(config_rule_custom)
+    service_config_rules.append(config_rule)
+
+    if 'sessionFilter' in bw_info:
+        a_ip = bw_info['sessionFilter']['sourceIp']
+        z_ip = bw_info['sessionFilter']['dstAddress']
 
         devices = client.ListDevices(Empty()).devices
+        ip_interface_name_dict = {}
         for device in devices:
+            device_endpoint_uuids = {ep.name:ep.endpoint_id.endpoint_uuid.uuid for ep in device.device_endpoints}
+            skip_device = True
             for cr in device.device_config.config_rules:
-                if cr.WhichOneof('config_rule') == 'custom' and cr.custom.resource_key == '_connect/settings':
-                    for ep in json.loads(cr.custom.resource_value)['endpoints']:
-                        if 'ip' in ep and (ep['ip'] == a_ip or ep['ip'] == z_ip):
-                            ep_id = EndPointId()
-                            ep_id.endpoint_uuid.uuid = ep['uuid']
-                            ep_id.device_id.device_uuid.uuid = device.device_id.device_uuid.uuid
-                            service.service_endpoint_ids.append(ep_id)
-
+                if cr.WhichOneof('config_rule') != 'custom':
+                    continue
+                match_subif = RE_CONFIG_RULE_IF_SUBIF.match(cr.custom.resource_key)
+                if not match_subif:
+                    continue
+                address_ip = json.loads(cr.custom.resource_value).get('address_ip')
+                short_port_name = match_subif.groups(0)[0]
+                ip_interface_name_dict[address_ip] = short_port_name
+                if address_ip not in [a_ip, z_ip]:
+                    continue
+                port_name = 'PORT-' + short_port_name # `PORT-` added as prefix
+                ep_id = EndPointId()
+                ep_id.endpoint_uuid.uuid = device_endpoint_uuids[port_name]
+                ep_id.device_id.device_uuid.uuid = device.device_id.device_uuid.uuid
+                service.service_endpoint_ids.append(ep_id)
+                # add interface config rules
+                endpoint_settings_key = ENDPOINT_SETTINGS_KEY.format(device.name, port_name, VLAN_TAG)
+                if address_ip in a_ip:
+                    router_id = ROUTER_ID_A
+                    policy_az = POLICY_AZ
+                    policy_za = POLICY_ZA
+                    neighbor_bgp_interface_address_ip = BGP_NEIGHBOR_IP_Z
+                    self_bgp_interface_address_ip = BGP_NEIGHBOR_IP_A
+                else:
+                    router_id = ROUTER_ID_Z
+                    policy_az = POLICY_ZA
+                    policy_za = POLICY_AZ
+                    neighbor_bgp_interface_address_ip= BGP_NEIGHBOR_IP_A
+                    self_bgp_interface_address_ip = BGP_NEIGHBOR_IP_Z
+                endpoint_field_updates = {
+                    'address_ip': (address_ip, True),
+                    'address_prefix'     : (PREFIX_LENGTH, True),
+                    'sub_interface_index': (0, True),
+                        }
+                LOGGER.debug(f'BEFORE UPDATE -> device.device_config.config_rules: {service_config_rules}')
+                update_config_rule_custom(service_config_rules, endpoint_settings_key, endpoint_field_updates)
+                LOGGER.debug(f'AFTER UPDATE -> device.device_config.config_rules: {service_config_rules}')
+                skip_device = False
+            if skip_device:
+                continue
+            device_field_updates = {
+                         'bgp_as':(BGP_AS, True),
+                         'route_distinguisher': (ROUTE_DISTINGUISHER, True),
+                         'router_id': (router_id, True),
+                         'policy_AZ': (policy_az, True),
+                         'policy_ZA': (policy_za, True),
+                         'neighbor_bgp_interface_address_ip': (neighbor_bgp_interface_address_ip, True),
+                         'self_bgp_interface_name': (ip_interface_name_dict[self_bgp_interface_address_ip], True),
+                         'self_bgp_interface_address_ip': (self_bgp_interface_address_ip, True),
+                         'bgp_interface_address_prefix': (PREFIX_LENGTH, True)
+                        }
+            device_settings_key = DEVICE_SETTINGS_KEY.format(device.name)
+            LOGGER.debug(f'BEFORE UPDATE -> device.device_config.config_rules: {service_config_rules}')
+            update_config_rule_custom(service_config_rules, device_settings_key, device_field_updates)
+            LOGGER.debug(f'AFTER UPDATE -> device.device_config.config_rules: {service_config_rules}')
+    
+    settings_cr_key = '/settings'
+    settings_cr_value = {}
+    update_config_rule_custom(service_config_rules, settings_cr_key, settings_cr_value)
+
+    service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED
     service.service_type = ServiceTypeEnum.SERVICETYPE_L3NM
 
-    if 'appInsId' in bwInfo:
-        service.service_id.service_uuid.uuid = bwInfo['appInsId']
+    if 'appInsId' in bw_info:
+        service.service_id.service_uuid.uuid = bw_info['appInsId']
         service.service_id.context_id.context_uuid.uuid = 'admin'
-        service.name = bwInfo['appInsId']
+        service.name = bw_info['appInsId']
 
-    if 'fixedAllocation' in bwInfo:
+    if 'fixedAllocation' in bw_info:
         capacity = Constraint_SLA_Capacity()
-        capacity.capacity_gbps = float(bwInfo['fixedAllocation']) / 1.e9
+        capacity.capacity_gbps = float(bw_info['fixedAllocation']) / 1.e9
         constraint = Constraint()
         constraint.sla_capacity.CopyFrom(capacity)
         service.service_constraints.append(constraint)
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/Acl.py b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/Acl.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e2f1389e6786a5cef322ecfaf64c12112409619
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/Acl.py
@@ -0,0 +1,75 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging, re
+from flask_restful import Resource
+from werkzeug.exceptions import NotFound
+from common.proto.context_pb2 import ConfigActionEnum, ConfigRule
+from common.tools.context_queries.Device import get_device
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from nbi.service.rest_server.nbi_plugins.tools.Authentication import HTTP_AUTH
+from .ietf_acl_parser import ietf_acl_from_config_rule_resource_value
+
+LOGGER = logging.getLogger(__name__)
+
+ACL_CONIG_RULE_KEY = r'\/device\[.+\]\/endpoint\[(.+)\]/acl_ruleset\[{}\]'
+
+class Acl(Resource):
+    @HTTP_AUTH.login_required
+    def get(self, device_uuid : str, acl_name : str):
+        LOGGER.debug('GET device_uuid={:s}, acl_name={:s}'.format(str(device_uuid), str(acl_name)))
+        RE_ACL_CONIG_RULE_KEY = re.compile(ACL_CONIG_RULE_KEY.format(acl_name))
+
+        context_client = ContextClient()
+        device = get_device(context_client, device_uuid, rw_copy=False, include_config_rules=True)
+        if device is None: raise NotFound('Device({:s}) not found'.format(str(device_uuid)))
+
+        for config_rule in device.device_config.config_rules:
+            if config_rule.WhichOneof('config_rule') != 'custom': continue
+            ep_uuid_match = RE_ACL_CONIG_RULE_KEY.match(config_rule.custom.resource_key)
+            if ep_uuid_match is None: continue
+            resource_value_dict = json.loads(config_rule.custom.resource_value)
+            return ietf_acl_from_config_rule_resource_value(resource_value_dict)
+
+        raise NotFound('Acl({:s}) not found in Device({:s})'.format(str(acl_name), str(device_uuid)))
+
+    @HTTP_AUTH.login_required
+    def delete(self, device_uuid : str, acl_name : str):
+        LOGGER.debug('DELETE device_uuid={:s}, acl_name={:s}'.format(str(device_uuid), str(acl_name)))
+        RE_ACL_CONIG_RULE_KEY = re.compile(ACL_CONIG_RULE_KEY.format(acl_name))
+
+        context_client = ContextClient()
+        device = get_device(context_client, device_uuid, rw_copy=True, include_config_rules=True)
+        if device is None: raise NotFound('Device({:s}) not found'.format(str(device_uuid)))
+
+        delete_config_rules = list()
+        for config_rule in device.device_config.config_rules:
+            if config_rule.WhichOneof('config_rule') != 'custom': continue
+            ep_uuid_match = RE_ACL_CONIG_RULE_KEY.match(config_rule.custom.resource_key)
+            if ep_uuid_match is None: continue
+
+            _config_rule = ConfigRule()
+            _config_rule.CopyFrom(config_rule)
+            _config_rule.action = ConfigActionEnum.CONFIGACTION_DELETE
+            delete_config_rules.append(_config_rule)
+
+        if len(delete_config_rules) == 0:
+            raise NotFound('Acl({:s}) not found in Device({:s})'.format(str(acl_name), str(device_uuid)))
+
+        device_client = DeviceClient()
+        del device.device_config.config_rules[:]
+        device.device_config.config_rules.extend(delete_config_rules)
+        device_client.ConfigureDevice(device)
+        return None
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/Acls.py b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/Acls.py
new file mode 100644
index 0000000000000000000000000000000000000000..1814abbb415cfbaee205ff7880fb299e70b5dba1
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/Acls.py
@@ -0,0 +1,131 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging
+from typing import Dict, List, Set
+from flask import jsonify, request
+from flask_restful import Resource
+from werkzeug.exceptions import BadRequest, NotFound, UnsupportedMediaType
+from common.proto.context_pb2 import ConfigRule
+from common.tools.context_queries.Device import get_device
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from nbi.service.rest_server.nbi_plugins.tools.Authentication import HTTP_AUTH
+from .ietf_acl_parser import AclDirectionEnum, config_rule_from_ietf_acl
+from .YangValidator import YangValidator
+
+LOGGER = logging.getLogger(__name__)
+
+
+def compose_interface_direction_acl_rules(
+    device_name : str, interface_name : str, interface_data : Dict,
+    acl_direction : AclDirectionEnum, acl_name__to__acl_data : Dict[str, Dict]
+) -> List[ConfigRule]:
+    acl_direction_name  = acl_direction.value
+    acl_direction_title = str(acl_direction_name).title()
+    direction_data : Dict[str, Dict] = interface_data.get(acl_direction_name, {})
+    acl_sets       : Dict[str, Dict] = direction_data.get('acl-sets',         {})
+    acl_set_list   : List[Dict]      = acl_sets      .get('acl-set',          [])
+    acl_set_names  : Set[str]        = {acl_set['name'] for acl_set in acl_set_list}
+
+    acl_config_rules : List[ConfigRule] = list()
+    for acl_set_name in acl_set_names:
+        acl_set = acl_name__to__acl_data.get(acl_set_name)
+        if acl_set is None:
+            MSG = 'Interface({:s})/{:s}/AclSet({:s}) not found'
+            raise NotFound(MSG.format(
+                str(interface_name), acl_direction_title,
+                str(acl_set_name)
+            ))
+
+        acl_config_rule = config_rule_from_ietf_acl(
+            device_name, interface_name, acl_set
+        )
+        MSG = 'Adding {:s} ACL Config Rule: {:s}'
+        LOGGER.info(MSG.format(
+            acl_direction_title, grpc_message_to_json_string(acl_config_rule)
+        ))
+        acl_config_rules.append(acl_config_rule)
+
+    return acl_config_rules
+
+class Acls(Resource):
+    @HTTP_AUTH.login_required
+    def get(self):
+        return {}
+
+    @HTTP_AUTH.login_required
+    def post(self, device_uuid : str):
+        if not request.is_json:
+            LOGGER.warning('POST device_uuid={:s}, body={:s}'.format(str(device_uuid), str(request.data)))
+            raise UnsupportedMediaType('JSON payload is required')
+        request_data : Dict = request.json
+        LOGGER.debug('POST device_uuid={:s}, body={:s}'.format(str(device_uuid), json.dumps(request_data)))
+
+        context_client = ContextClient()
+        device = get_device(
+            context_client, device_uuid, rw_copy=True, include_config_rules=False, include_components=False
+        )
+        if device is None:
+            raise NotFound('Device({:s}) not found'.format(str(device_uuid)))
+
+        device_name = device.name
+        interface_names : Set[str] = set()
+        for endpoint in device.device_endpoints:
+            interface_names.add(endpoint.endpoint_id.endpoint_uuid.uuid)
+            interface_names.add(endpoint.name)
+
+        yang_validator = YangValidator()
+        request_data = yang_validator.parse_to_dict(request_data, list(interface_names))
+        yang_validator.destroy()
+
+        acls          : Dict = request_data.get('acls', {})
+        acl_list      : List = acls.get('acl', [])
+        acl_name__to__acl_data = {
+            acl['name'] : acl
+            for acl in acl_list
+        }
+
+        if len(acl_name__to__acl_data) == 0:
+            raise BadRequest('No ACLs defined in the request')
+
+        interface_list : List = acls.get('attachment-points', {}).get('interface', [])
+        interface_name__to__interface_data = {
+            interface['interface-id'] : interface
+            for interface in interface_list
+        }
+
+        if len(interface_name__to__interface_data) == 0:
+            raise BadRequest('No interfaces defined in the request')
+
+        for interface_name in interface_names:
+            interface_data = interface_name__to__interface_data.get(interface_name)
+            if interface_data is None: continue
+
+            ingress_acl_config_rules = compose_interface_direction_acl_rules(
+                device_name, interface_name, interface_data, AclDirectionEnum.INGRESS,
+                acl_name__to__acl_data
+            )
+            device.device_config.config_rules.extend(ingress_acl_config_rules)
+
+            egress_acl_config_rules = compose_interface_direction_acl_rules(
+                device_name, interface_name, interface_data, AclDirectionEnum.EGRESS,
+                acl_name__to__acl_data
+            )
+            device.device_config.config_rules.extend(egress_acl_config_rules)
+
+        device_client = DeviceClient()
+        device_client.ConfigureDevice(device)
+        return jsonify({})
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/YangValidator.py b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/YangValidator.py
new file mode 100644
index 0000000000000000000000000000000000000000..56bf9b30c1bc2ab6a36a3d59519b544cd3c00ef3
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/YangValidator.py
@@ -0,0 +1,111 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, json, libyang, logging, os
+from typing import Dict, List, Optional
+
+LOGGER = logging.getLogger(__name__)
+
+YANG_DIR = os.path.join(os.path.dirname(__file__), 'yang')
+YANG_MODULES = [
+    'ietf-yang-types',
+    'ietf-interfaces',
+    'iana-if-type',
+    'ietf-access-control-list',
+]
+
+class YangValidator:
+    def __init__(self) -> None:
+        self._yang_context = libyang.Context(YANG_DIR)
+        for module_name in YANG_MODULES:
+            LOGGER.info('Loading module: {:s}'.format(str(module_name)))
+            yang_module = self._yang_context.load_module(module_name)
+            yang_module.feature_enable_all()
+
+    def parse_to_dict(self, message : Dict, interface_names : List[str]) -> Dict:
+        LOGGER.debug('[parse_to_dict] message={:s}'.format(json.dumps(message)))
+        LOGGER.debug('[parse_to_dict] interface_names={:s}'.format(json.dumps(interface_names)))
+
+        # Inject synthetic interfaces for validation purposes
+        interfaces = self._yang_context.create_data_path('/ietf-interfaces:interfaces')
+        for if_index,interface_name in enumerate(interface_names):
+            if_path = 'interface[name="{:s}"]'.format(str(interface_name))
+            interface = interfaces.create_path(if_path)
+            interface.create_path('if-index', if_index + 1)
+            interface.create_path('type', 'iana-if-type:ethernetCsmacd')
+            interface.create_path('admin-status', 'up')
+            interface.create_path('oper-status', 'up')
+            statistics = interface.create_path('statistics')
+            statistics.create_path('discontinuity-time', '2024-07-11T10:00:00.000000Z')
+
+        extended_message = copy.deepcopy(message)
+        extended_message['ietf-interfaces:interfaces'] = interfaces.print_dict()['interfaces']
+        LOGGER.debug('[parse_to_dict] extended_message={:s}'.format(json.dumps(extended_message)))
+
+        dnode : Optional[libyang.DNode] = self._yang_context.parse_data_mem(
+            json.dumps(extended_message), 'json', validate_present=True, strict=True
+        )
+        if dnode is None:
+            LOGGER.error('[parse_to_dict] unable to parse message')
+            raise Exception('Unable to parse Message({:s})'.format(str(message)))
+        message_dict = dnode.print_dict()
+        LOGGER.debug('[parse_to_dict] message_dict={:s}'.format(json.dumps(message_dict)))
+
+        dnode.free()
+        interfaces.free()
+        return message_dict
+
+    def destroy(self) -> None:
+        self._yang_context.destroy()
+        self._yang_context = None
+
+def main() -> None:
+    import uuid # pylint: disable=import-outside-toplevel
+    logging.basicConfig(level=logging.DEBUG)
+
+    interface_names = {'200', '500', str(uuid.uuid4()), str(uuid.uuid4())}
+    ACL_RULE = {"ietf-access-control-list:acls": {
+        "acl": [{
+            "name": "sample-ipv4-acl", "type": "ipv4-acl-type",
+            "aces": {"ace": [{
+                "name": "rule1",
+                "matches": {
+                    "ipv4": {
+                        "source-ipv4-network": "128.32.10.6/24",
+                        "destination-ipv4-network": "172.10.33.0/24",
+                        "dscp": 18
+                    },
+                    "tcp": {
+                        "source-port": {"operator": "eq", "port": 1444},
+                        "destination-port": {"operator": "eq", "port": 1333},
+                        "flags": "syn"
+                    }
+                },
+                "actions": {"forwarding": "drop"}
+            }]}
+        }],
+        "attachment-points": {"interface": [{
+            "interface-id": "200",
+            "ingress": {"acl-sets": {"acl-set": [{"name": "sample-ipv4-acl"}]}}
+        }]
+    }}}
+
+    yang_validator = YangValidator()
+    request_data = yang_validator.parse_to_dict(ACL_RULE, list(interface_names))
+    yang_validator.destroy()
+
+    LOGGER.info('request_data = {:s}'.format(str(request_data)))
+
+if __name__ == '__main__':
+    main()
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/__init__.py b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3538b24ba56b2a6011b76b3878c4bef690fe1fc8
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/__init__.py
@@ -0,0 +1,38 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from flask_restful import Resource
+from nbi.service.rest_server.RestServer import RestServer
+from .Acl import Acl
+from .Acls import Acls
+
+URL_PREFIX = '/restconf/data'
+
+def __add_resource(rest_server: RestServer, resource: Resource, *urls, **kwargs):
+    urls = [(URL_PREFIX + url) for url in urls]
+    rest_server.add_resource(resource, *urls, **kwargs)
+
+def register_ietf_acl(rest_server: RestServer):
+    __add_resource(
+        rest_server,
+        Acls,
+        '/device=<path:device_uuid>/ietf-access-control-list:acls',
+    )
+
+    __add_resource(
+        rest_server,
+        Acl,
+        '/device=<path:device_uuid>/ietf-access-control-list:acl=<path:acl_name>',
+        '/device=<path:device_uuid>/ietf-access-control-list:acl=<path:acl_name>/',
+    )
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/ietf_acl_parser.py b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/ietf_acl_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..085d680d177d2f48d41c1160c3a70b6c7c4209cb
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/ietf_acl_parser.py
@@ -0,0 +1,257 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from enum import Enum
+from typing import List, Dict
+from pydantic import BaseModel, Field
+from werkzeug.exceptions import NotImplemented
+from common.proto.acl_pb2 import AclForwardActionEnum, AclRuleTypeEnum, AclEntry
+from common.proto.context_pb2 import ConfigActionEnum, ConfigRule
+
+class AclDirectionEnum(Enum):
+    INGRESS = 'ingress'
+    EGRESS  = 'egress'
+
+class Ipv4(BaseModel):
+    dscp: int = 0
+    source_ipv4_network: str = Field(serialization_alias="source-ipv4-network", default="") 
+    destination_ipv4_network: str = Field(serialization_alias="destination-ipv4-network", default="") 
+
+class Port(BaseModel):
+    port: int = 0
+    operator: str = "eq"
+
+class Tcp(BaseModel):
+    flags: str = ""
+    source_port: Port = Field(serialization_alias="source-port", default_factory=lambda: Port())
+    destination_port: Port = Field(serialization_alias="destination-port", default_factory=lambda: Port())
+
+class Matches(BaseModel):
+    ipv4: Ipv4 = Ipv4()
+    tcp: Tcp = Tcp()
+
+class Action(BaseModel):
+    forwarding: str = ""
+
+class Ace(BaseModel):
+    name: str = "custom_rule"
+    matches: Matches = Matches()
+    actions: Action = Action()
+
+class Aces(BaseModel):
+    ace: List[Ace] = [Ace()]
+
+class Acl(BaseModel):
+    name: str = ""
+    type: str = ""
+    aces: Aces = Aces()
+
+class Name(BaseModel):
+    name: str = ""
+
+class AclSet(BaseModel):
+    acl_set: List[Name] = Field(serialization_alias="acl-set", default=[Name()])
+
+class AclSets(BaseModel):
+    acl_sets: AclSet = Field(serialization_alias="acl-sets", default=AclSet())
+
+class Ingress(BaseModel):
+    ingress : AclSets = AclSets()
+
+class Egress(BaseModel):
+    egress : AclSets = AclSets()
+
+class Interface(BaseModel):
+    interface_id: str = Field(serialization_alias="interface-id", default="")
+    ingress : Ingress = Ingress()
+    egress  : Egress  = Egress()
+
+class Interfaces(BaseModel):
+    interface: List[Interface] = [Interface()]
+
+class AttachmentPoints(BaseModel):
+    attachment_points: Interfaces = Field(serialization_alias="attachment-points", default=Interfaces())
+
+class Acls(BaseModel):
+    acl: List[Acl] = [Acl()]
+    attachment_points: AttachmentPoints = Field(serialization_alias="attachment-points", default=AttachmentPoints())
+
+class IETF_ACL(BaseModel):
+    acls: Acls = Acls()
+    
+
+IETF_TFS_RULE_TYPE_MAPPING = {
+    "ipv4-acl-type": "ACLRULETYPE_IPV4",
+    "ipv6-acl-type": "ACLRULETYPE_IPV6",
+}
+
+IETF_TFS_FORWARDING_ACTION_MAPPING = {
+    "accept": "ACLFORWARDINGACTION_ACCEPT",
+    "drop"  : "ACLFORWARDINGACTION_DROP",
+}
+
+TFS_IETF_RULE_TYPE_MAPPING = {
+    "ACLRULETYPE_IPV4": "ipv4-acl-type",
+    "ACLRULETYPE_IPV6": "ipv6-acl-type",
+}
+
+TFS_IETF_FORWARDING_ACTION_MAPPING = {
+    "ACLFORWARDINGACTION_ACCEPT": "accept",
+    "ACLFORWARDINGACTION_DROP"  : "drop",
+}
+
+def config_rule_from_ietf_acl(
+    device_name : str, endpoint_name : str, acl_set_data : Dict
+) -> ConfigRule:
+    acl_config_rule = ConfigRule()
+    acl_config_rule.action = ConfigActionEnum.CONFIGACTION_SET
+    acl_endpoint_id = acl_config_rule.acl.endpoint_id
+    acl_endpoint_id.device_id.device_uuid.uuid = device_name
+    acl_endpoint_id.endpoint_uuid.uuid = endpoint_name
+
+    acl_name = acl_set_data['name']
+    acl_type = acl_set_data['type']
+    if acl_type.startswith('ietf-access-control-list:'):
+        acl_type = acl_type.replace('ietf-access-control-list:', '')
+    acl_type = getattr(AclRuleTypeEnum, IETF_TFS_RULE_TYPE_MAPPING[acl_type])
+
+    acl_rule_set = acl_config_rule.acl.rule_set
+    acl_rule_set.name = acl_name
+    acl_rule_set.type = acl_type
+    #acl_rule_set.description = ...
+
+    access_control_entry_list = acl_set_data.get('aces', {}).get('ace', [])
+    for sequence_id,ace in enumerate(access_control_entry_list):
+        ace_name    = ace['name']
+        ace_matches = ace.get('matches', {})
+        ace_actions = ace.get('actions', {})
+
+        acl_entry = AclEntry()
+        acl_entry.sequence_id = sequence_id + 1
+        #acl_entry.description = ...
+        
+        if 'ipv4' in ace_matches:
+            ipv4_data = ace_matches['ipv4']
+            if 'source-ipv4-network' in ipv4_data:
+                acl_entry.match.src_address = ipv4_data['source-ipv4-network']
+            if 'destination-ipv4-network' in ipv4_data:
+                acl_entry.match.dst_address = ipv4_data['destination-ipv4-network']
+            if 'dscp' in ipv4_data:
+                acl_entry.match.dscp = ipv4_data['dscp']
+            if 'protocol' in ipv4_data:
+                acl_entry.match.protocol = ipv4_data['protocol']
+
+        if 'tcp' in ace_matches:
+            # https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
+            acl_entry.match.protocol = 6
+            tcp_data = ace_matches['tcp']
+            if 'source-port' in tcp_data:
+                tcp_src_port : Dict = tcp_data['source-port']
+                tcp_src_port_op = tcp_src_port.get('operator', 'eq')
+                if tcp_src_port_op != 'eq':
+                    MSG = 'Acl({:s})/Ace({:s})/Match/Tcp({:s}) operator not supported'
+                    raise NotImplemented(MSG.format(acl_name, ace_name, str(tcp_data)))
+                acl_entry.match.src_port = tcp_src_port['port']
+            if 'destination-port' in tcp_data:
+                tcp_dst_port : Dict = tcp_data['destination-port']
+                tcp_dst_port_op = tcp_dst_port.get('operator', 'eq')
+                if tcp_dst_port_op != 'eq':
+                    MSG = 'Acl({:s})/Ace({:s})/Match/Tcp({:s}) operator not supported'
+                    raise NotImplemented(MSG.format(acl_name, ace_name, str(tcp_data)))
+                acl_entry.match.dst_port = tcp_dst_port['port']
+            if 'flags' in tcp_data:
+                acl_entry.match.tcp_flags = tcp_data['flags']
+
+        if 'udp' in ace_matches:
+            # https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
+            acl_entry.match.protocol = 17
+            udp_data = ace_matches['udp']
+            if 'source-port' in udp_data:
+                udp_src_port : Dict = udp_data['source-port']
+                udp_src_port_op = udp_src_port.get('operator', 'eq')
+                if udp_src_port_op != 'eq':
+                    MSG = 'Acl({:s})/Ace({:s})/Match/Udp({:s}) operator not supported'
+                    raise NotImplemented(MSG.format(acl_name, ace_name, str(udp_data)))
+                acl_entry.match.src_port = udp_src_port['port']
+            if 'destination-port' in udp_data:
+                udp_dst_port : Dict = udp_data['destination-port']
+                udp_dst_port_op = udp_dst_port.get('operator', 'eq')
+                if udp_dst_port_op != 'eq':
+                    MSG = 'Acl({:s})/Ace({:s})/Match/Udp({:s}) operator not supported'
+                    raise NotImplemented(MSG.format(acl_name, ace_name, str(udp_data)))
+                acl_entry.match.dst_port = udp_dst_port['port']
+
+        if 'forwarding' in ace_actions:
+            ace_forward_action = ace_actions['forwarding']
+            if ace_forward_action.startswith('ietf-access-control-list:'):
+                ace_forward_action = ace_forward_action.replace('ietf-access-control-list:', '')
+            ace_forward_action = IETF_TFS_FORWARDING_ACTION_MAPPING[ace_forward_action]
+            acl_entry.action.forward_action = getattr(AclForwardActionEnum, ace_forward_action)
+
+        acl_rule_set.entries.append(acl_entry)
+
+    return acl_config_rule
+
+def ietf_acl_from_config_rule_resource_value(config_rule_rv: Dict) -> Dict:
+    rule_set = config_rule_rv['rule_set']
+    acl_entry = rule_set['entries'][0]
+    match_ = acl_entry['match']
+
+    ipv4 = Ipv4(
+        dscp=match_["dscp"],
+        source_ipv4_network=match_["src_address"],
+        destination_ipv4_network=match_["dst_address"]
+    )
+    tcp = Tcp(
+        flags=match_["tcp_flags"],
+        source_port=Port(port=match_["src_port"]),
+        destination_port=Port(port=match_["dst_port"])
+    )
+    matches = Matches(ipvr=ipv4, tcp=tcp)
+    aces = Aces(ace=[
+        Ace(
+            matches=matches,
+            actions=Action(
+                forwarding=TFS_IETF_FORWARDING_ACTION_MAPPING[acl_entry["action"]["forward_action"]]
+            )
+        )
+    ])
+    acl = Acl(
+        name=rule_set["name"],
+        type=TFS_IETF_RULE_TYPE_MAPPING[rule_set["type"]],
+        aces=aces
+    )
+    acl_sets = AclSets(
+        acl_sets=AclSet(
+            acl_set=[
+                Name(name=rule_set["name"])
+            ]
+        )
+    )
+    ingress = Ingress(ingress=acl_sets)
+    interfaces = Interfaces(interface=[
+        Interface(
+            interface_id=config_rule_rv["interface"],
+            ingress=ingress
+        )
+    ])
+    acls = Acls(
+        acl=[acl],
+        attachment_points=AttachmentPoints(
+            attachment_points=interfaces
+        )
+    )
+    ietf_acl = IETF_ACL(acls=acls)
+
+    return ietf_acl.model_dump(by_alias=True)
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/iana-if-type@2014-05-08.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/iana-if-type@2014-05-08.yang
new file mode 100644
index 0000000000000000000000000000000000000000..8d52d16f505074ed5c147b22f248bb2ceb89352a
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/iana-if-type@2014-05-08.yang
@@ -0,0 +1,1508 @@
+module iana-if-type {
+  namespace "urn:ietf:params:xml:ns:yang:iana-if-type";
+  prefix ianaift;
+
+  import ietf-interfaces {
+    prefix if;
+  }
+
+  organization "IANA";
+  contact
+    "        Internet Assigned Numbers Authority
+
+    Postal: ICANN
+            4676 Admiralty Way, Suite 330
+            Marina del Rey, CA 90292
+
+    Tel:    +1 310 823 9358
+    <mailto:iana@iana.org>";
+    
+  description
+    "This YANG module defines YANG identities for IANA-registered
+    interface types.
+
+    This YANG module is maintained by IANA and reflects the
+    'ifType definitions' registry.
+
+    The latest revision of this YANG module can be obtained from
+    the IANA web site.
+
+    Requests for new values should be made to IANA via
+    email (iana@iana.org).
+
+    Copyright (c) 2014 IETF Trust and the persons identified as
+    authors of the code.  All rights reserved.
+
+    Redistribution and use in source and binary forms, with or
+    without modification, is permitted pursuant to, and subject
+    to the license terms contained in, the Simplified BSD License
+    set forth in Section 4.c of the IETF Trust's Legal Provisions
+    Relating to IETF Documents
+    (http://trustee.ietf.org/license-info).
+
+    The initial version of this YANG module is part of RFC 7224;
+    see the RFC itself for full legal notices.";
+    reference
+      "IANA 'ifType definitions' registry.
+      <http://www.iana.org/assignments/smi-numbers>";
+
+  revision 2014-05-08 {
+    description
+      "Initial revision.";
+    reference
+      "RFC 7224: IANA Interface Type YANG Module";
+  }
+
+  identity iana-interface-type {
+    base if:interface-type;
+    description
+      "This identity is used as a base for all interface types
+      defined in the 'ifType definitions' registry.";
+  }
+
+  identity other {
+    base iana-interface-type;
+  }
+  identity regular1822 {
+    base iana-interface-type;
+  }
+  identity hdh1822 {
+    base iana-interface-type;
+  }
+  identity ddnX25 {
+    base iana-interface-type;
+  }
+  identity rfc877x25 {
+    base iana-interface-type;
+    reference
+      "RFC 1382 - SNMP MIB Extension for the X.25 Packet Layer";
+  }
+  identity ethernetCsmacd {
+    base iana-interface-type;
+    description
+      "For all Ethernet-like interfaces, regardless of speed,
+      as per RFC 3635.";
+    reference
+      "RFC 3635 - Definitions of Managed Objects for the
+                  Ethernet-like Interface Types";
+  }
+  identity iso88023Csmacd {
+    base iana-interface-type;
+    status deprecated;
+    description
+      "Deprecated via RFC 3635.
+      Use ethernetCsmacd(6) instead.";
+    reference
+      "RFC 3635 - Definitions of Managed Objects for the
+                  Ethernet-like Interface Types";
+  }
+  identity iso88024TokenBus {
+    base iana-interface-type;
+  }
+  identity iso88025TokenRing {
+    base iana-interface-type;
+  }
+  identity iso88026Man {
+    base iana-interface-type;
+  }
+  identity starLan {
+    base iana-interface-type;
+    status deprecated;
+    description
+      "Deprecated via RFC 3635.
+      Use ethernetCsmacd(6) instead.";
+    reference
+      "RFC 3635 - Definitions of Managed Objects for the
+                  Ethernet-like Interface Types";
+  }
+  identity proteon10Mbit {
+    base iana-interface-type;
+  }
+  identity proteon80Mbit {
+    base iana-interface-type;
+  }
+  identity hyperchannel {
+    base iana-interface-type;
+  }
+  identity fddi {
+    base iana-interface-type;
+    reference
+      "RFC 1512 - FDDI Management Information Base";
+  }
+  identity lapb {
+    base iana-interface-type;
+    reference
+      "RFC 1381 - SNMP MIB Extension for X.25 LAPB";
+  }
+  identity sdlc {
+    base iana-interface-type;
+  }
+  identity ds1 {
+    base iana-interface-type;
+    description
+      "DS1-MIB.";
+    reference
+      "RFC 4805 - Definitions of Managed Objects for the
+                  DS1, J1, E1, DS2, and E2 Interface Types";
+  }
+  identity e1 {
+    base iana-interface-type;
+    status obsolete;
+    description
+      "Obsolete; see DS1-MIB.";
+    reference
+      "RFC 4805 - Definitions of Managed Objects for the
+                  DS1, J1, E1, DS2, and E2 Interface Types";
+  }
+  identity basicISDN {
+    base iana-interface-type;
+    description
+      "No longer used.  See also RFC 2127.";
+  }
+  identity primaryISDN {
+    base iana-interface-type;
+    description
+      "No longer used.  See also RFC 2127.";
+  }
+  identity propPointToPointSerial {
+    base iana-interface-type;
+    description
+      "Proprietary serial.";
+  }
+  identity ppp {
+    base iana-interface-type;
+  }
+  identity softwareLoopback {
+    base iana-interface-type;
+  }
+  identity eon {
+    base iana-interface-type;
+    description
+      "CLNP over IP.";
+  }
+  identity ethernet3Mbit {
+    base iana-interface-type;
+  }
+  identity nsip {
+    base iana-interface-type;
+    description
+      "XNS over IP.";
+  }
+  identity slip {
+    base iana-interface-type;
+    description
+      "Generic SLIP.";
+  }
+  identity ultra {
+    base iana-interface-type;
+    description
+      "Ultra Technologies.";
+  }
+  identity ds3 {
+    base iana-interface-type;
+    description
+      "DS3-MIB.";
+    reference
+      "RFC 3896 - Definitions of Managed Objects for the
+                  DS3/E3 Interface Type";
+  }
+  identity sip {
+    base iana-interface-type;
+    description
+      "SMDS, coffee.";
+    reference
+      "RFC 1694 - Definitions of Managed Objects for SMDS
+                  Interfaces using SMIv2";
+  }
+  identity frameRelay {
+    base iana-interface-type;
+    description
+      "DTE only.";
+    reference
+      "RFC 2115 - Management Information Base for Frame Relay
+                  DTEs Using SMIv2";
+  }
+  identity rs232 {
+    base iana-interface-type;
+    reference
+      "RFC 1659 - Definitions of Managed Objects for RS-232-like
+                  Hardware Devices using SMIv2";
+  }
+  identity para {
+    base iana-interface-type;
+    description
+      "Parallel-port.";
+    reference
+      "RFC 1660 - Definitions of Managed Objects for
+                  Parallel-printer-like Hardware Devices using
+                  SMIv2";
+  }
+  identity arcnet {
+    base iana-interface-type;
+    description
+      "ARCnet.";
+  }
+  identity arcnetPlus {
+    base iana-interface-type;
+    description
+      "ARCnet Plus.";
+  }
+  identity atm {
+    base iana-interface-type;
+    description
+      "ATM cells.";
+  }
+  identity miox25 {
+    base iana-interface-type;
+    reference
+      "RFC 1461 - SNMP MIB extension for Multiprotocol
+                  Interconnect over X.25";
+  }
+  identity sonet {
+    base iana-interface-type;
+    description
+      "SONET or SDH.";
+  }
+  identity x25ple {
+    base iana-interface-type;
+    reference
+      "RFC 2127 - ISDN Management Information Base using SMIv2";
+  }
+  identity iso88022llc {
+    base iana-interface-type;
+  }
+  identity localTalk {
+    base iana-interface-type;
+  }
+  identity smdsDxi {
+    base iana-interface-type;
+  }
+  identity frameRelayService {
+    base iana-interface-type;
+    description
+      "FRNETSERV-MIB.";
+    reference
+      "RFC 2954 - Definitions of Managed Objects for Frame
+                  Relay Service";
+  }
+  identity v35 {
+    base iana-interface-type;
+  }
+  identity hssi {
+    base iana-interface-type;
+  }
+  identity hippi {
+    base iana-interface-type;
+  }
+  identity modem {
+    base iana-interface-type;
+    description
+      "Generic modem.";
+  }
+  identity aal5 {
+    base iana-interface-type;
+    description
+      "AAL5 over ATM.";
+  }
+  identity sonetPath {
+    base iana-interface-type;
+  }
+  identity sonetVT {
+    base iana-interface-type;
+  }
+  identity smdsIcip {
+    base iana-interface-type;
+    description
+      "SMDS InterCarrier Interface.";
+  }
+  identity propVirtual {
+    base iana-interface-type;
+    description
+      "Proprietary virtual/internal.";
+    reference
+      "RFC 2863 - The Interfaces Group MIB";
+  }
+  identity propMultiplexor {
+    base iana-interface-type;
+    description
+      "Proprietary multiplexing.";
+    reference
+      "RFC 2863 - The Interfaces Group MIB";
+  }
+  identity ieee80212 {
+    base iana-interface-type;
+    description
+      "100BaseVG.";
+  }
+  identity fibreChannel {
+    base iana-interface-type;
+    description
+      "Fibre Channel.";
+  }
+  identity hippiInterface {
+    base iana-interface-type;
+    description
+      "HIPPI interfaces.";
+  }
+  identity frameRelayInterconnect {
+    base iana-interface-type;
+    status obsolete;
+    description
+      "Obsolete; use either
+      frameRelay(32) or frameRelayService(44).";
+  }
+  identity aflane8023 {
+    base iana-interface-type;
+    description
+      "ATM Emulated LAN for 802.3.";
+  }
+  identity aflane8025 {
+    base iana-interface-type;
+    description
+      "ATM Emulated LAN for 802.5.";
+  }
+  identity cctEmul {
+    base iana-interface-type;
+    description
+      "ATM Emulated circuit.";
+  }
+  identity fastEther {
+    base iana-interface-type;
+    status deprecated;
+    description
+      "Obsoleted via RFC 3635.
+      ethernetCsmacd(6) should be used instead.";
+    reference
+      "RFC 3635 - Definitions of Managed Objects for the
+                  Ethernet-like Interface Types";
+  }
+  identity isdn {
+    base iana-interface-type;
+    description
+      "ISDN and X.25.";
+    reference
+      "RFC 1356 - Multiprotocol Interconnect on X.25 and ISDN
+                  in the Packet Mode";
+  }
+  identity v11 {
+    base iana-interface-type;
+    description
+      "CCITT V.11/X.21.";
+  }
+  identity v36 {
+    base iana-interface-type;
+    description
+      "CCITT V.36.";
+  }
+  identity g703at64k {
+    base iana-interface-type;
+    description
+      "CCITT G703 at 64Kbps.";
+  }
+  identity g703at2mb {
+    base iana-interface-type;
+    status obsolete;
+    description
+      "Obsolete; see DS1-MIB.";
+  }
+  identity qllc {
+    base iana-interface-type;
+    description
+      "SNA QLLC.";
+  }
+  identity fastEtherFX {
+    base iana-interface-type;
+    status deprecated;
+    description
+      "Obsoleted via RFC 3635.
+      ethernetCsmacd(6) should be used instead.";
+    reference
+      "RFC 3635 - Definitions of Managed Objects for the
+                  Ethernet-like Interface Types";
+  }
+  identity channel {
+    base iana-interface-type;
+    description
+      "Channel.";
+  }
+  identity ieee80211 {
+    base iana-interface-type;
+    description
+      "Radio spread spectrum.";
+  }
+  identity ibm370parChan {
+    base iana-interface-type;
+    description
+      "IBM System 360/370 OEMI Channel.";
+  }
+  identity escon {
+    base iana-interface-type;
+    description
+      "IBM Enterprise Systems Connection.";
+  }
+  identity dlsw {
+    base iana-interface-type;
+    description
+      "Data Link Switching.";
+  }
+  identity isdns {
+    base iana-interface-type;
+    description
+      "ISDN S/T interface.";
+  }
+  identity isdnu {
+    base iana-interface-type;
+    description
+      "ISDN U interface.";
+  }
+  identity lapd {
+    base iana-interface-type;
+    description
+      "Link Access Protocol D.";
+  }
+  identity ipSwitch {
+    base iana-interface-type;
+    description
+      "IP Switching Objects.";
+  }
+  identity rsrb {
+    base iana-interface-type;
+    description
+      "Remote Source Route Bridging.";
+  }
+  identity atmLogical {
+    base iana-interface-type;
+    description
+      "ATM Logical Port.";
+    reference
+      "RFC 3606 - Definitions of Supplemental Managed Objects
+                  for ATM Interface";
+  }
+  identity ds0 {
+    base iana-interface-type;
+    description
+      "Digital Signal Level 0.";
+    reference
+      "RFC 2494 - Definitions of Managed Objects for the DS0
+                  and DS0 Bundle Interface Type";
+  }
+  identity ds0Bundle {
+    base iana-interface-type;
+    description
+      "Group of ds0s on the same ds1.";
+    reference
+      "RFC 2494 - Definitions of Managed Objects for the DS0
+                  and DS0 Bundle Interface Type";
+  }
+  identity bsc {
+    base iana-interface-type;
+    description
+      "Bisynchronous Protocol.";
+  }
+  identity async {
+    base iana-interface-type;
+    description
+      "Asynchronous Protocol.";
+  }
+  identity cnr {
+    base iana-interface-type;
+    description
+      "Combat Net Radio.";
+  }
+  identity iso88025Dtr {
+    base iana-interface-type;
+    description
+      "ISO 802.5r DTR.";
+  }
+  identity eplrs {
+    base iana-interface-type;
+    description
+      "Ext Pos Loc Report Sys.";
+  }
+  identity arap {
+    base iana-interface-type;
+    description
+      "Appletalk Remote Access Protocol.";
+  }
+  identity propCnls {
+    base iana-interface-type;
+    description
+      "Proprietary Connectionless Protocol.";
+  }
+  identity hostPad {
+    base iana-interface-type;
+    description
+      "CCITT-ITU X.29 PAD Protocol.";
+  }
+  identity termPad {
+    base iana-interface-type;
+    description
+      "CCITT-ITU X.3 PAD Facility.";
+  }
+  identity frameRelayMPI {
+    base iana-interface-type;
+    description
+      "Multiproto Interconnect over FR.";
+  }
+  identity x213 {
+    base iana-interface-type;
+    description
+      "CCITT-ITU X213.";
+  }
+  identity adsl {
+    base iana-interface-type;
+    description
+      "Asymmetric Digital Subscriber Loop.";
+  }
+  identity radsl {
+    base iana-interface-type;
+    description
+      "Rate-Adapt. Digital Subscriber Loop.";
+  }
+  identity sdsl {
+    base iana-interface-type;
+    description
+      "Symmetric Digital Subscriber Loop.";
+  }
+  identity vdsl {
+    base iana-interface-type;
+    description
+      "Very H-Speed Digital Subscrib. Loop.";
+  }
+  identity iso88025CRFPInt {
+    base iana-interface-type;
+    description
+      "ISO 802.5 CRFP.";
+  }
+  identity myrinet {
+    base iana-interface-type;
+    description
+      "Myricom Myrinet.";
+  }
+  identity voiceEM {
+    base iana-interface-type;
+    description
+      "Voice recEive and transMit.";
+  }
+  identity voiceFXO {
+    base iana-interface-type;
+    description
+      "Voice Foreign Exchange Office.";
+  }
+  identity voiceFXS {
+    base iana-interface-type;
+    description
+      "Voice Foreign Exchange Station.";
+  }
+  identity voiceEncap {
+    base iana-interface-type;
+    description
+      "Voice encapsulation.";
+  }
+  identity voiceOverIp {
+    base iana-interface-type;
+    description
+      "Voice over IP encapsulation.";
+  }
+  identity atmDxi {
+    base iana-interface-type;
+    description
+      "ATM DXI.";
+  }
+  identity atmFuni {
+    base iana-interface-type;
+    description
+      "ATM FUNI.";
+  }
+  identity atmIma {
+    base iana-interface-type;
+    description
+      "ATM IMA.";
+  }
+  identity pppMultilinkBundle {
+    base iana-interface-type;
+    description
+      "PPP Multilink Bundle.";
+  }
+  identity ipOverCdlc {
+    base iana-interface-type;
+    description
+      "IBM ipOverCdlc.";
+  }
+  identity ipOverClaw {
+    base iana-interface-type;
+    description
+      "IBM Common Link Access to Workstn.";
+  }
+  identity stackToStack {
+    base iana-interface-type;
+    description
+      "IBM stackToStack.";
+  }
+  identity virtualIpAddress {
+    base iana-interface-type;
+    description
+      "IBM VIPA.";
+  }
+  identity mpc {
+    base iana-interface-type;
+    description
+      "IBM multi-protocol channel support.";
+  }
+  identity ipOverAtm {
+    base iana-interface-type;
+    description
+      "IBM ipOverAtm.";
+    reference
+      "RFC 2320 - Definitions of Managed Objects for Classical IP
+                  and ARP Over ATM Using SMIv2 (IPOA-MIB)";
+  }
+  identity iso88025Fiber {
+    base iana-interface-type;
+    description
+      "ISO 802.5j Fiber Token Ring.";
+  }
+  identity tdlc {
+    base iana-interface-type;
+    description
+      "IBM twinaxial data link control.";
+  }
+  identity gigabitEthernet {
+    base iana-interface-type;
+    status deprecated;
+    description
+      "Obsoleted via RFC 3635.
+      ethernetCsmacd(6) should be used instead.";
+    reference
+      "RFC 3635 - Definitions of Managed Objects for the
+                  Ethernet-like Interface Types";
+  }
+  identity hdlc {
+    base iana-interface-type;
+    description
+      "HDLC.";
+  }
+  identity lapf {
+    base iana-interface-type;
+    description
+      "LAP F.";
+  }
+  identity v37 {
+    base iana-interface-type;
+    description
+      "V.37.";
+  }
+  identity x25mlp {
+    base iana-interface-type;
+    description
+      "Multi-Link Protocol.";
+  }
+  identity x25huntGroup {
+    base iana-interface-type;
+    description
+      "X25 Hunt Group.";
+  }
+  identity transpHdlc {
+    base iana-interface-type;
+    description
+      "Transp HDLC.";
+  }
+  identity interleave {
+    base iana-interface-type;
+    description
+      "Interleave channel.";
+  }
+  identity fast {
+    base iana-interface-type;
+    description
+      "Fast channel.";
+  }
+  identity ip {
+    base iana-interface-type;
+    description
+      "IP (for APPN HPR in IP networks).";
+  }
+  identity docsCableMaclayer {
+    base iana-interface-type;
+    description
+      "CATV Mac Layer.";
+  }
+  identity docsCableDownstream {
+    base iana-interface-type;
+    description
+      "CATV Downstream interface.";
+  }
+  identity docsCableUpstream {
+    base iana-interface-type;
+    description
+      "CATV Upstream interface.";
+  }
+  identity a12MppSwitch {
+    base iana-interface-type;
+    description
+      "Avalon Parallel Processor.";
+  }
+  identity tunnel {
+    base iana-interface-type;
+    description
+      "Encapsulation interface.";
+  }
+  identity coffee {
+    base iana-interface-type;
+    description
+      "Coffee pot.";
+    reference
+      "RFC 2325 - Coffee MIB";
+  }
+  identity ces {
+    base iana-interface-type;
+    description
+      "Circuit Emulation Service.";
+  }
+  identity atmSubInterface {
+    base iana-interface-type;
+    description
+      "ATM Sub Interface.";
+  }
+  identity l2vlan {
+    base iana-interface-type;
+    description
+      "Layer 2 Virtual LAN using 802.1Q.";
+  }
+  identity l3ipvlan {
+    base iana-interface-type;
+    description
+      "Layer 3 Virtual LAN using IP.";
+  }
+  identity l3ipxvlan {
+    base iana-interface-type;
+    description
+      "Layer 3 Virtual LAN using IPX.";
+  }
+  identity digitalPowerline {
+    base iana-interface-type;
+    description
+      "IP over Power Lines.";
+  }
+  identity mediaMailOverIp {
+    base iana-interface-type;
+    description
+      "Multimedia Mail over IP.";
+  }
+  identity dtm {
+    base iana-interface-type;
+    description
+      "Dynamic synchronous Transfer Mode.";
+  }
+  identity dcn {
+    base iana-interface-type;
+    description
+      "Data Communications Network.";
+  }
+  identity ipForward {
+    base iana-interface-type;
+    description
+      "IP Forwarding Interface.";
+  }
+  identity msdsl {
+    base iana-interface-type;
+    description
+      "Multi-rate Symmetric DSL.";
+  }
+  identity ieee1394 {
+    base iana-interface-type;
+    description
+      "IEEE1394 High Performance Serial Bus.";
+  }
+  identity if-gsn {
+    base iana-interface-type;
+    description
+      "HIPPI-6400.";
+  }
+  identity dvbRccMacLayer {
+    base iana-interface-type;
+    description
+      "DVB-RCC MAC Layer.";
+  }
+  identity dvbRccDownstream {
+    base iana-interface-type;
+    description
+      "DVB-RCC Downstream Channel.";
+  }
+  identity dvbRccUpstream {
+    base iana-interface-type;
+    description
+      "DVB-RCC Upstream Channel.";
+  }
+  identity atmVirtual {
+    base iana-interface-type;
+    description
+      "ATM Virtual Interface.";
+  }
+  identity mplsTunnel {
+    base iana-interface-type;
+    description
+      "MPLS Tunnel Virtual Interface.";
+  }
+  identity srp {
+    base iana-interface-type;
+    description
+      "Spatial Reuse Protocol.";
+  }
+  identity voiceOverAtm {
+    base iana-interface-type;
+    description
+      "Voice over ATM.";
+  }
+  identity voiceOverFrameRelay {
+    base iana-interface-type;
+    description
+      "Voice Over Frame Relay.";
+  }
+  identity idsl {
+    base iana-interface-type;
+    description
+      "Digital Subscriber Loop over ISDN.";
+  }
+  identity compositeLink {
+    base iana-interface-type;
+    description
+      "Avici Composite Link Interface.";
+  }
+  identity ss7SigLink {
+    base iana-interface-type;
+    description
+      "SS7 Signaling Link.";
+  }
+  identity propWirelessP2P {
+    base iana-interface-type;
+    description
+      "Prop. P2P wireless interface.";
+  }
+  identity frForward {
+    base iana-interface-type;
+    description
+      "Frame Forward Interface.";
+  }
+  identity rfc1483 {
+    base iana-interface-type;
+    description
+      "Multiprotocol over ATM AAL5.";
+    reference
+      "RFC 1483 - Multiprotocol Encapsulation over ATM
+                  Adaptation Layer 5";
+  }
+  identity usb {
+    base iana-interface-type;
+    description
+      "USB Interface.";
+  }
+  identity ieee8023adLag {
+    base iana-interface-type;
+    description
+      "IEEE 802.3ad Link Aggregate.";
+  }
+  identity bgppolicyaccounting {
+    base iana-interface-type;
+    description
+      "BGP Policy Accounting.";
+  }
+  identity frf16MfrBundle {
+    base iana-interface-type;
+    description
+      "FRF.16 Multilink Frame Relay.";
+  }
+  identity h323Gatekeeper {
+    base iana-interface-type;
+    description
+      "H323 Gatekeeper.";
+  }
+  identity h323Proxy {
+    base iana-interface-type;
+    description
+      "H323 Voice and Video Proxy.";
+  }
+  identity mpls {
+    base iana-interface-type;
+    description
+      "MPLS.";
+  }
+  identity mfSigLink {
+    base iana-interface-type;
+    description
+      "Multi-frequency signaling link.";
+  }
+  identity hdsl2 {
+    base iana-interface-type;
+    description
+      "High Bit-Rate DSL - 2nd generation.";
+  }
+  identity shdsl {
+    base iana-interface-type;
+    description
+      "Multirate HDSL2.";
+  }
+  identity ds1FDL {
+    base iana-interface-type;
+    description
+      "Facility Data Link (4Kbps) on a DS1.";
+  }
+  identity pos {
+    base iana-interface-type;
+    description
+      "Packet over SONET/SDH Interface.";
+  }
+  identity dvbAsiIn {
+    base iana-interface-type;
+    description
+      "DVB-ASI Input.";
+  }
+  identity dvbAsiOut {
+    base iana-interface-type;
+    description
+      "DVB-ASI Output.";
+  }
+  identity plc {
+    base iana-interface-type;
+    description
+      "Power Line Communications.";
+  }
+  identity nfas {
+    base iana-interface-type;
+    description
+      "Non-Facility Associated Signaling.";
+  }
+  identity tr008 {
+    base iana-interface-type;
+    description
+      "TR008.";
+  }
+  identity gr303RDT {
+    base iana-interface-type;
+    description
+      "Remote Digital Terminal.";
+  }
+  identity gr303IDT {
+    base iana-interface-type;
+    description
+      "Integrated Digital Terminal.";
+  }
+  identity isup {
+    base iana-interface-type;
+    description
+      "ISUP.";
+  }
+  identity propDocsWirelessMaclayer {
+    base iana-interface-type;
+    description
+      "Cisco proprietary Maclayer.";
+  }
+  identity propDocsWirelessDownstream {
+    base iana-interface-type;
+    description
+      "Cisco proprietary Downstream.";
+  }
+  identity propDocsWirelessUpstream {
+    base iana-interface-type;
+    description
+      "Cisco proprietary Upstream.";
+  }
+  identity hiperlan2 {
+    base iana-interface-type;
+    description
+      "HIPERLAN Type 2 Radio Interface.";
+  }
+  identity propBWAp2Mp {
+    base iana-interface-type;
+    description
+      "PropBroadbandWirelessAccesspt2Multipt (use of this value
+      for IEEE 802.16 WMAN interfaces as per IEEE Std 802.16f
+      is deprecated, and ieee80216WMAN(237) should be used
+      instead).";
+  }
+  identity sonetOverheadChannel {
+    base iana-interface-type;
+    description
+      "SONET Overhead Channel.";
+  }
+  identity digitalWrapperOverheadChannel {
+    base iana-interface-type;
+    description
+      "Digital Wrapper.";
+  }
+  identity aal2 {
+    base iana-interface-type;
+    description
+      "ATM adaptation layer 2.";
+  }
+  identity radioMAC {
+    base iana-interface-type;
+    description
+      "MAC layer over radio links.";
+  }
+  identity atmRadio {
+    base iana-interface-type;
+    description
+      "ATM over radio links.";
+  }
+  identity imt {
+    base iana-interface-type;
+    description
+      "Inter-Machine Trunks.";
+  }
+  identity mvl {
+    base iana-interface-type;
+    description
+      "Multiple Virtual Lines DSL.";
+  }
+  identity reachDSL {
+    base iana-interface-type;
+    description
+      "Long Reach DSL.";
+  }
+  identity frDlciEndPt {
+    base iana-interface-type;
+    description
+      "Frame Relay DLCI End Point.";
+  }
+  identity atmVciEndPt {
+    base iana-interface-type;
+    description
+      "ATM VCI End Point.";
+  }
+  identity opticalChannel {
+    base iana-interface-type;
+    description
+      "Optical Channel.";
+  }
+  identity opticalTransport {
+    base iana-interface-type;
+    description
+      "Optical Transport.";
+  }
+  identity propAtm {
+    base iana-interface-type;
+    description
+      "Proprietary ATM.";
+  }
+  identity voiceOverCable {
+    base iana-interface-type;
+    description
+      "Voice Over Cable Interface.";
+  }
+  identity infiniband {
+    base iana-interface-type;
+    description
+      "Infiniband.";
+  }
+  identity teLink {
+    base iana-interface-type;
+    description
+      "TE Link.";
+  }
+  identity q2931 {
+    base iana-interface-type;
+    description
+      "Q.2931.";
+  }
+  identity virtualTg {
+    base iana-interface-type;
+    description
+      "Virtual Trunk Group.";
+  }
+  identity sipTg {
+    base iana-interface-type;
+    description
+      "SIP Trunk Group.";
+  }
+  identity sipSig {
+    base iana-interface-type;
+    description
+      "SIP Signaling.";
+  }
+  identity docsCableUpstreamChannel {
+    base iana-interface-type;
+    description
+      "CATV Upstream Channel.";
+  }
+  identity econet {
+    base iana-interface-type;
+    description
+      "Acorn Econet.";
+  }
+  identity pon155 {
+    base iana-interface-type;
+    description
+      "FSAN 155Mb Symetrical PON interface.";
+  }
+  identity pon622 {
+    base iana-interface-type;
+    description
+      "FSAN 622Mb Symetrical PON interface.";
+  }
+  identity bridge {
+    base iana-interface-type;
+    description
+      "Transparent bridge interface.";
+  }
+  identity linegroup {
+    base iana-interface-type;
+    description
+      "Interface common to multiple lines.";
+  }
+  identity voiceEMFGD {
+    base iana-interface-type;
+    description
+      "Voice E&M Feature Group D.";
+  }
+  identity voiceFGDEANA {
+    base iana-interface-type;
+    description
+      "Voice FGD Exchange Access North American.";
+  }
+  identity voiceDID {
+    base iana-interface-type;
+    description
+      "Voice Direct Inward Dialing.";
+  }
+  identity mpegTransport {
+    base iana-interface-type;
+    description
+      "MPEG transport interface.";
+  }
+  identity sixToFour {
+    base iana-interface-type;
+    status deprecated;
+    description
+      "6to4 interface (DEPRECATED).";
+    reference
+      "RFC 4087 - IP Tunnel MIB";
+  }
+  identity gtp {
+    base iana-interface-type;
+    description
+      "GTP (GPRS Tunneling Protocol).";
+  }
+  identity pdnEtherLoop1 {
+    base iana-interface-type;
+    description
+      "Paradyne EtherLoop 1.";
+  }
+  identity pdnEtherLoop2 {
+    base iana-interface-type;
+    description
+      "Paradyne EtherLoop 2.";
+  }
+  identity opticalChannelGroup {
+    base iana-interface-type;
+    description
+      "Optical Channel Group.";
+  }
+  identity homepna {
+    base iana-interface-type;
+    description
+      "HomePNA ITU-T G.989.";
+  }
+  identity gfp {
+    base iana-interface-type;
+    description
+      "Generic Framing Procedure (GFP).";
+  }
+  identity ciscoISLvlan {
+    base iana-interface-type;
+    description
+      "Layer 2 Virtual LAN using Cisco ISL.";
+  }
+  identity actelisMetaLOOP {
+    base iana-interface-type;
+    description
+      "Acteleis proprietary MetaLOOP High Speed Link.";
+  }
+  identity fcipLink {
+    base iana-interface-type;
+    description
+      "FCIP Link.";
+  }
+  identity rpr {
+    base iana-interface-type;
+    description
+      "Resilient Packet Ring Interface Type.";
+  }
+  identity qam {
+    base iana-interface-type;
+    description
+      "RF Qam Interface.";
+  }
+  identity lmp {
+    base iana-interface-type;
+    description
+      "Link Management Protocol.";
+    reference
+      "RFC 4327 - Link Management Protocol (LMP) Management
+                  Information Base (MIB)";
+  }
+  identity cblVectaStar {
+    base iana-interface-type;
+    description
+      "Cambridge Broadband Networks Limited VectaStar.";
+  }
+  identity docsCableMCmtsDownstream {
+    base iana-interface-type;
+    description
+      "CATV Modular CMTS Downstream Interface.";
+  }
+  identity adsl2 {
+    base iana-interface-type;
+    status deprecated;
+    description
+      "Asymmetric Digital Subscriber Loop Version 2
+      (DEPRECATED/OBSOLETED - please use adsl2plus(238)
+      instead).";
+    reference
+      "RFC 4706 - Definitions of Managed Objects for Asymmetric
+                  Digital Subscriber Line 2 (ADSL2)";
+  }
+  identity macSecControlledIF {
+    base iana-interface-type;
+    description
+      "MACSecControlled.";
+  }
+  identity macSecUncontrolledIF {
+    base iana-interface-type;
+    description
+      "MACSecUncontrolled.";
+  }
+  identity aviciOpticalEther {
+    base iana-interface-type;
+    description
+      "Avici Optical Ethernet Aggregate.";
+  }
+  identity atmbond {
+    base iana-interface-type;
+    description
+      "atmbond.";
+  }
+  identity voiceFGDOS {
+    base iana-interface-type;
+    description
+      "Voice FGD Operator Services.";
+  }
+  identity mocaVersion1 {
+    base iana-interface-type;
+    description
+      "MultiMedia over Coax Alliance (MoCA) Interface
+      as documented in information provided privately to IANA.";
+  }
+  identity ieee80216WMAN {
+    base iana-interface-type;
+    description
+      "IEEE 802.16 WMAN interface.";
+  }
+  identity adsl2plus {
+    base iana-interface-type;
+    description
+      "Asymmetric Digital Subscriber Loop Version 2 -
+      Version 2 Plus and all variants.";
+  }
+  identity dvbRcsMacLayer {
+    base iana-interface-type;
+    description
+      "DVB-RCS MAC Layer.";
+    reference
+      "RFC 5728 - The SatLabs Group DVB-RCS MIB";
+  }
+  identity dvbTdm {
+    base iana-interface-type;
+    description
+      "DVB Satellite TDM.";
+    reference
+      "RFC 5728 - The SatLabs Group DVB-RCS MIB";
+  }
+  identity dvbRcsTdma {
+    base iana-interface-type;
+    description
+      "DVB-RCS TDMA.";
+    reference
+      "RFC 5728 - The SatLabs Group DVB-RCS MIB";
+  }
+  identity x86Laps {
+    base iana-interface-type;
+    description
+      "LAPS based on ITU-T X.86/Y.1323.";
+  }
+  identity wwanPP {
+    base iana-interface-type;
+    description
+      "3GPP WWAN.";
+  }
+  identity wwanPP2 {
+    base iana-interface-type;
+    description
+      "3GPP2 WWAN.";
+  }
+  identity voiceEBS {
+    base iana-interface-type;
+    description
+      "Voice P-phone EBS physical interface.";
+  }
+  identity ifPwType {
+    base iana-interface-type;
+    description
+      "Pseudowire interface type.";
+    reference
+      "RFC 5601 - Pseudowire (PW) Management Information Base (MIB)";
+  }
+  identity ilan {
+    base iana-interface-type;
+    description
+      "Internal LAN on a bridge per IEEE 802.1ap.";
+  }
+  identity pip {
+    base iana-interface-type;
+    description
+      "Provider Instance Port on a bridge per IEEE 802.1ah PBB.";
+  }
+  identity aluELP {
+    base iana-interface-type;
+    description
+      "Alcatel-Lucent Ethernet Link Protection.";
+  }
+  identity gpon {
+    base iana-interface-type;
+    description
+      "Gigabit-capable passive optical networks (G-PON) as per
+      ITU-T G.948.";
+  }
+  identity vdsl2 {
+    base iana-interface-type;
+    description
+      "Very high speed digital subscriber line Version 2
+      (as per ITU-T Recommendation G.993.2).";
+    reference
+      "RFC 5650 - Definitions of Managed Objects for Very High
+                  Speed Digital Subscriber Line 2 (VDSL2)";
+  }
+  identity capwapDot11Profile {
+    base iana-interface-type;
+    description
+      "WLAN Profile Interface.";
+    reference
+      "RFC 5834 - Control and Provisioning of Wireless Access
+                  Points (CAPWAP) Protocol Binding MIB for
+                  IEEE 802.11";
+  }
+  identity capwapDot11Bss {
+    base iana-interface-type;
+    description
+      "WLAN BSS Interface.";
+    reference
+      "RFC 5834 - Control and Provisioning of Wireless Access
+                  Points (CAPWAP) Protocol Binding MIB for
+                  IEEE 802.11";
+  }
+  identity capwapWtpVirtualRadio {
+    base iana-interface-type;
+    description
+      "WTP Virtual Radio Interface.";
+    reference
+      "RFC 5833 - Control and Provisioning of Wireless Access
+                  Points (CAPWAP) Protocol Base MIB";
+  }
+  identity bits {
+    base iana-interface-type;
+    description
+      "bitsport.";
+  }
+  identity docsCableUpstreamRfPort {
+    base iana-interface-type;
+    description
+      "DOCSIS CATV Upstream RF Port.";
+  }
+  identity cableDownstreamRfPort {
+    base iana-interface-type;
+    description
+      "CATV downstream RF Port.";
+  }
+  identity vmwareVirtualNic {
+    base iana-interface-type;
+    description
+      "VMware Virtual Network Interface.";
+  }
+  identity ieee802154 {
+    base iana-interface-type;
+    description
+      "IEEE 802.15.4 WPAN interface.";
+    reference
+      "IEEE 802.15.4-2006";
+  }
+  identity otnOdu {
+    base iana-interface-type;
+    description
+      "OTN Optical Data Unit.";
+  }
+  identity otnOtu {
+    base iana-interface-type;
+    description
+      "OTN Optical channel Transport Unit.";
+  }
+  identity ifVfiType {
+    base iana-interface-type;
+    description
+      "VPLS Forwarding Instance Interface Type.";
+  }
+  identity g9981 {
+    base iana-interface-type;
+    description
+      "G.998.1 bonded interface.";
+  }
+  identity g9982 {
+    base iana-interface-type;
+    description
+      "G.998.2 bonded interface.";
+  }
+  identity g9983 {
+    base iana-interface-type;
+    description
+      "G.998.3 bonded interface.";
+  }
+  identity aluEpon {
+    base iana-interface-type;
+    description
+      "Ethernet Passive Optical Networks (E-PON).";
+  }
+  identity aluEponOnu {
+    base iana-interface-type;
+    description
+      "EPON Optical Network Unit.";
+  }
+  identity aluEponPhysicalUni {
+    base iana-interface-type;
+    description
+      "EPON physical User to Network interface.";
+  }
+  identity aluEponLogicalLink {
+    base iana-interface-type;
+    description
+      "The emulation of a point-to-point link over the EPON
+      layer.";
+  }
+  identity aluGponOnu {
+    base iana-interface-type;
+    description
+      "GPON Optical Network Unit.";
+    reference
+      "ITU-T G.984.2";
+  }
+  identity aluGponPhysicalUni {
+    base iana-interface-type;
+    description
+      "GPON physical User to Network interface.";
+    reference
+      "ITU-T G.984.2";
+  }
+  identity vmwareNicTeam {
+    base iana-interface-type;
+    description
+      "VMware NIC Team.";
+  }
+}
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-access-control-list@2019-03-04.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-access-control-list@2019-03-04.yang
new file mode 100644
index 0000000000000000000000000000000000000000..00ae58ee6a63d385c583231f0b84bcdd1bdc41bf
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-access-control-list@2019-03-04.yang
@@ -0,0 +1,674 @@
+module ietf-access-control-list {
+  yang-version 1.1;
+  namespace "urn:ietf:params:xml:ns:yang:ietf-access-control-list";
+  prefix acl;
+
+  import ietf-yang-types {
+    prefix yang;
+    reference
+      "RFC 6991 - Common YANG Data Types.";
+  }
+
+  import ietf-packet-fields {
+    prefix pf;
+    reference
+      "RFC 8519 - YANG Data Model for Network Access Control
+                  Lists (ACLs).";
+  }
+
+  import ietf-interfaces {
+    prefix if;
+    reference
+      "RFC 8343 - A YANG Data Model for Interface Management.";
+  }
+
+  organization
+    "IETF NETMOD (Network Modeling) Working Group.";
+
+  contact
+    "WG Web:  <https://datatracker.ietf.org/wg/netmod/>
+     WG List: netmod@ietf.org
+
+     Editor: Mahesh Jethanandani
+             mjethanandani@gmail.com
+     Editor: Lisa Huang
+             huangyi_99@yahoo.com
+     Editor: Sonal Agarwal
+             sagarwal12@gmail.com
+     Editor: Dana Blair
+             dana@blairhome.com";
+
+  description
+    "This YANG module defines a component that describes the
+     configuration and monitoring of Access Control Lists (ACLs).
+
+     The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL',
+     'SHALL NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED',
+     'NOT RECOMMENDED', 'MAY', and 'OPTIONAL' in this document
+     are to be interpreted as described in BCP 14 (RFC 2119)
+     (RFC 8174) when, and only when, they appear in all
+     capitals, as shown here.
+
+     Copyright (c) 2019 IETF Trust and the persons identified as
+     the document authors.  All rights reserved.
+
+     Redistribution and use in source and binary forms, with or
+     without modification, is permitted pursuant to, and subject
+     to the license terms contained in, the Simplified BSD
+     License set forth in Section 4.c of the IETF Trust's Legal
+     Provisions Relating to IETF Documents
+     (http://trustee.ietf.org/license-info).
+
+     This version of this YANG module is part of RFC 8519; see
+     the RFC itself for full legal notices.";
+
+  revision 2019-03-04 {
+    description
+      "Initial version.";
+    reference
+      "RFC 8519: YANG Data Model for Network Access Control
+                 Lists (ACLs).";
+  }
+
+  /*
+   * Identities
+   */
+  /*
+   * Forwarding actions for a packet
+   */
+
+  identity forwarding-action {
+    description
+      "Base identity for actions in the forwarding category.";
+  }
+
+  identity accept {
+    base forwarding-action;
+    description
+      "Accept the packet.";
+  }
+
+  identity drop {
+    base forwarding-action;
+    description
+      "Drop packet without sending any ICMP error message.";
+  }
+
+  identity reject {
+    base forwarding-action;
+    description
+      "Drop the packet and send an ICMP error message to the source.";
+  }
+
+  /*
+   * Logging actions for a packet
+   */
+
+  identity log-action {
+    description
+      "Base identity for defining the destination for logging
+       actions.";
+  }
+
+  identity log-syslog {
+    base log-action;
+    description
+      "System log (syslog) the information for the packet.";
+  }
+
+  identity log-none {
+    base log-action;
+    description
+      "No logging for the packet.";
+  }
+
+  /*
+   * ACL type identities
+   */
+
+  identity acl-base {
+    description
+      "Base Access Control List type for all Access Control List type
+       identifiers.";
+  }
+
+  identity ipv4-acl-type {
+    base acl:acl-base;
+    if-feature "ipv4";
+    description
+      "An ACL that matches on fields from the IPv4 header
+       (e.g., IPv4 destination address) and Layer 4 headers (e.g., TCP
+       destination port).  An ACL of type ipv4 does not contain
+       matches on fields in the Ethernet header or the IPv6 header.";
+  }
+
+  identity ipv6-acl-type {
+    base acl:acl-base;
+    if-feature "ipv6";
+    description
+      "An ACL that matches on fields from the IPv6 header
+       (e.g., IPv6 destination address) and Layer 4 headers (e.g., TCP
+       destination port).  An ACL of type ipv6 does not contain
+       matches on fields in the Ethernet header or the IPv4 header.";
+  }
+
+  identity eth-acl-type {
+    base acl:acl-base;
+    if-feature "eth";
+    description
+      "An ACL that matches on fields in the Ethernet header,
+       like 10/100/1000baseT or a Wi-Fi Access Control List.  An ACL
+       of type ethernet does not contain matches on fields in the
+       IPv4 header, the IPv6 header, or Layer 4 headers.";
+  }
+
+  identity mixed-eth-ipv4-acl-type {
+    base acl:eth-acl-type;
+    base acl:ipv4-acl-type;
+    if-feature "mixed-eth-ipv4";
+    description
+      "An ACL that contains a mix of entries that match
+       on fields in Ethernet headers and in IPv4 headers.
+       Matching on Layer 4 header fields may also exist in the
+       list.";
+  }
+
+  identity mixed-eth-ipv6-acl-type {
+    base acl:eth-acl-type;
+    base acl:ipv6-acl-type;
+    if-feature "mixed-eth-ipv6";
+    description
+      "An ACL that contains a mix of entries that match on fields
+       in Ethernet headers and in IPv6 headers.  Matching
+       on Layer 4 header fields may also exist in the list.";
+  }
+
+  identity mixed-eth-ipv4-ipv6-acl-type {
+    base acl:eth-acl-type;
+    base acl:ipv4-acl-type;
+    base acl:ipv6-acl-type;
+    if-feature "mixed-eth-ipv4-ipv6";
+    description
+      "An ACL that contains a mix of entries that
+       match on fields in Ethernet headers, IPv4 headers, and IPv6
+       headers.  Matching on Layer 4 header fields may also exist
+       in the list.";
+  }
+
+  /*
+   * Features
+   */
+
+  /*
+   * Features supported by device
+   */
+  feature match-on-eth {
+    description
+      "The device can support matching on Ethernet headers.";
+  }
+
+  feature match-on-ipv4 {
+    description
+      "The device can support matching on IPv4 headers.";
+  }
+
+  feature match-on-ipv6 {
+    description
+      "The device can support matching on IPv6 headers.";
+  }
+
+  feature match-on-tcp {
+    description
+      "The device can support matching on TCP headers.";
+  }
+
+  feature match-on-udp {
+    description
+      "The device can support matching on UDP headers.";
+  }
+
+  feature match-on-icmp {
+    description
+      "The device can support matching on ICMP (v4 and v6) headers.";
+  }
+
+  /*
+   * Header classifications combinations supported by
+   * device
+   */
+
+  feature eth {
+    if-feature "match-on-eth";
+    description
+      "Plain Ethernet ACL supported.";
+  }
+
+  feature ipv4 {
+    if-feature "match-on-ipv4";
+    description
+      "Plain IPv4 ACL supported.";
+  }
+
+  feature ipv6 {
+    if-feature "match-on-ipv6";
+    description
+      "Plain IPv6 ACL supported.";
+  }
+
+  feature mixed-eth-ipv4 {
+    if-feature "match-on-eth and match-on-ipv4";
+    description
+      "Ethernet and IPv4 ACL combinations supported.";
+  }
+
+  feature mixed-eth-ipv6 {
+    if-feature "match-on-eth and match-on-ipv6";
+    description
+      "Ethernet and IPv6 ACL combinations supported.";
+  }
+
+  feature mixed-eth-ipv4-ipv6 {
+    if-feature
+      "match-on-eth and match-on-ipv4
+       and match-on-ipv6";
+    description
+      "Ethernet, IPv4, and IPv6 ACL combinations supported.";
+  }
+
+  /*
+   * Stats Features
+   */
+  feature interface-stats {
+    description
+      "ACL counters are available and reported only per interface.";
+  }
+
+  feature acl-aggregate-stats {
+    description
+      "ACL counters are aggregated over all interfaces and reported
+       only per ACL entry.";
+  }
+
+  /*
+   * Attachment point features
+   */
+  feature interface-attachment {
+    description
+      "ACLs are set on interfaces.";
+  }
+
+  /*
+   * Typedefs
+   */
+  typedef acl-type {
+    type identityref {
+      base acl-base;
+    }
+    description
+      "This type is used to refer to an ACL type.";
+  }
+
+  /*
+   * Groupings
+   */
+  grouping acl-counters {
+    description
+      "Common grouping for ACL counters.";
+    leaf matched-packets {
+      type yang:counter64;
+      config false;
+      description
+        "Count of the number of packets matching the current ACL
+         entry.
+
+         An implementation should provide this counter on a
+         per-interface, per-ACL-entry basis if possible.
+
+         If an implementation only supports ACL counters on a per-
+         entry basis (i.e., not broken out per interface), then the
+         value should be equal to the aggregate count across all
+         interfaces.
+
+         An implementation that provides counters on a per-entry, per-
+         interface basis is not required to also provide an aggregate
+         count, e.g., per entry -- the user is expected to be able to
+         implement the required aggregation if such a count is
+         needed.";
+    }
+
+    leaf matched-octets {
+      type yang:counter64;
+      config false;
+      description
+        "Count of the number of octets (bytes) matching the current
+         ACL entry.
+
+         An implementation should provide this counter on a
+         per-interface, per-ACL-entry basis if possible.
+
+         If an implementation only supports ACL counters per entry
+         (i.e., not broken out per interface), then the value
+         should be equal to the aggregate count across all interfaces.
+
+         An implementation that provides counters per entry per
+         interface is not required to also provide an aggregate count,
+         e.g., per entry -- the user is expected to be able to
+         implement the required aggregation if such a count is needed.";
+    }
+  }
+
+  /*
+   * Configuration and monitoring data nodes
+   */
+
+  container acls {
+    description
+      "This is a top-level container for Access Control Lists.
+       It can have one or more acl nodes.";
+    list acl {
+      key "name";
+      description
+        "An ACL is an ordered list of ACEs.  Each ACE has a
+         list of match criteria and a list of actions.
+         Since there are several kinds of ACLs implemented
+         with different attributes for different vendors,
+         this model accommodates customizing ACLs for
+         each kind and for each vendor.";
+      leaf name {
+        type string {
+          length "1..64";
+        }
+        description
+          "The name of the access list.  A device MAY further
+           restrict the length of this name; space and special
+           characters are not allowed.";
+      }
+      leaf type {
+        type acl-type;
+        description
+          "Type of ACL.  Indicates the primary intended
+           type of match criteria (e.g., Ethernet, IPv4, IPv6, mixed,
+           etc.) used in the list instance.";
+      }
+      container aces {
+        description
+          "The aces container contains one or more ACE nodes.";
+        list ace {
+          key "name";
+          ordered-by user;
+          description
+            "List of ACEs.";
+          leaf name {
+            type string {
+              length "1..64";
+            }
+            description
+              "A unique name identifying this ACE.";
+          }
+
+          container matches {
+            description
+              "The rules in this set determine what fields will be
+               matched upon before any action is taken on them.
+               The rules are selected based on the feature set
+               defined by the server and the acl-type defined.
+               If no matches are defined in a particular container,
+               then any packet will match that container.  If no
+               matches are specified at all in an ACE, then any
+               packet will match the ACE.";
+
+            choice l2 {
+              container eth {
+                when "derived-from-or-self(/acls/acl/type, "
+                   + "'acl:eth-acl-type')";
+                if-feature "match-on-eth";
+                uses pf:acl-eth-header-fields;
+                description
+                  "Rule set that matches Ethernet headers.";
+              }
+              description
+                "Match Layer 2 headers, for example, Ethernet
+                 header fields.";
+            }
+
+            choice l3 {
+              container ipv4 {
+                when "derived-from-or-self(/acls/acl/type, "
+                   + "'acl:ipv4-acl-type')";
+                if-feature "match-on-ipv4";
+                uses pf:acl-ip-header-fields;
+                uses pf:acl-ipv4-header-fields;
+                description
+                  "Rule set that matches IPv4 headers.";
+              }
+
+              container ipv6 {
+                when "derived-from-or-self(/acls/acl/type, "
+                   + "'acl:ipv6-acl-type')";
+                if-feature "match-on-ipv6";
+                uses pf:acl-ip-header-fields;
+                uses pf:acl-ipv6-header-fields;
+                description
+                  "Rule set that matches IPv6 headers.";
+              }
+              description
+                "Choice of either IPv4 or IPv6 headers";
+            }
+
+            choice l4 {
+              container tcp {
+                if-feature "match-on-tcp";
+                uses pf:acl-tcp-header-fields;
+                container source-port {
+                  choice source-port {
+                    case range-or-operator {
+                      uses pf:port-range-or-operator;
+                      description
+                        "Source port definition from range or
+                         operator.";
+                    }
+                    description
+                      "Choice of source port definition using
+                       range/operator or a choice to support future
+                       'case' statements, such as one enabling a
+                       group of source ports to be referenced.";
+                  }
+                  description
+                    "Source port definition.";
+                }
+                container destination-port {
+                  choice destination-port {
+                    case range-or-operator {
+                      uses pf:port-range-or-operator;
+                      description
+                        "Destination port definition from range or
+                         operator.";
+                    }
+                    description
+                      "Choice of destination port definition using
+                       range/operator or a choice to support future
+                       'case' statements, such as one enabling a
+                       group of destination ports to be referenced.";
+                  }
+                  description
+                    "Destination port definition.";
+                }
+                description
+                  "Rule set that matches TCP headers.";
+              }
+
+              container udp {
+                if-feature "match-on-udp";
+                uses pf:acl-udp-header-fields;
+                container source-port {
+                  choice source-port {
+                    case range-or-operator {
+                      uses pf:port-range-or-operator;
+                      description
+                        "Source port definition from range or
+                         operator.";
+                    }
+                    description
+                      "Choice of source port definition using
+                       range/operator or a choice to support future
+                       'case' statements, such as one enabling a
+                       group of source ports to be referenced.";
+                  }
+                  description
+                    "Source port definition.";
+                }
+                container destination-port {
+                  choice destination-port {
+                    case range-or-operator {
+                      uses pf:port-range-or-operator;
+                      description
+                        "Destination port definition from range or
+                         operator.";
+                    }
+                    description
+                      "Choice of destination port definition using
+                       range/operator or a choice to support future
+                       'case' statements, such as one enabling a
+                       group of destination ports to be referenced.";
+                  }
+                  description
+                    "Destination port definition.";
+                }
+                description
+                  "Rule set that matches UDP headers.";
+              }
+
+              container icmp {
+                if-feature "match-on-icmp";
+                uses pf:acl-icmp-header-fields;
+                description
+                  "Rule set that matches ICMP headers.";
+              }
+              description
+                "Choice of TCP, UDP, or ICMP headers.";
+            }
+
+            leaf egress-interface {
+              type if:interface-ref;
+              description
+
+                "Egress interface.  This should not be used if this ACL
+                 is attached as an egress ACL (or the value should
+                 equal the interface to which the ACL is attached).";
+            }
+
+            leaf ingress-interface {
+              type if:interface-ref;
+              description
+                "Ingress interface.  This should not be used if this ACL
+                 is attached as an ingress ACL (or the value should
+                 equal the interface to which the ACL is attached).";
+            }
+          }
+
+          container actions {
+            description
+              "Definition of actions for this ace entry.";
+            leaf forwarding {
+              type identityref {
+                base forwarding-action;
+              }
+              mandatory true;
+              description
+                "Specifies the forwarding action per ace entry.";
+            }
+
+            leaf logging {
+              type identityref {
+                base log-action;
+              }
+              default "log-none";
+              description
+                "Specifies the log action and destination for
+                 matched packets.  Default value is not to log the
+                 packet.";
+            }
+          }
+          container statistics {
+            if-feature "acl-aggregate-stats";
+            config false;
+            description
+              "Statistics gathered across all attachment points for the
+               given ACL.";
+            uses acl-counters;
+          }
+        }
+      }
+    }
+
+    container attachment-points {
+      description
+        "Enclosing container for the list of
+         attachment points on which ACLs are set.";
+      /*
+       * Groupings
+       */
+      grouping interface-acl {
+        description
+          "Grouping for per-interface ingress ACL data.";
+        container acl-sets {
+          description
+            "Enclosing container for the list of ingress ACLs on the
+             interface.";
+          list acl-set {
+            key "name";
+            ordered-by user;
+            description
+              "List of ingress ACLs on the interface.";
+            leaf name {
+              type leafref {
+                path "/acls/acl/name";
+              }
+              description
+                "Reference to the ACL name applied on the ingress.";
+            }
+            list ace-statistics {
+              if-feature "interface-stats";
+              key "name";
+              config false;
+              description
+                "List of ACEs.";
+              leaf name {
+                type leafref {
+                  path "/acls/acl/aces/ace/name";
+                }
+                description
+                  "Name of the ace entry.";
+              }
+              uses acl-counters;
+            }
+          }
+        }
+      }
+
+      list interface {
+        if-feature "interface-attachment";
+        key "interface-id";
+        description
+          "List of interfaces on which ACLs are set.";
+
+        leaf interface-id {
+          type if:interface-ref;
+          description
+            "Reference to the interface id list key.";
+        }
+
+        container ingress {
+          uses interface-acl;
+          description
+            "The ACLs applied to the ingress interface.";
+        }
+        container egress {
+          uses interface-acl;
+          description
+            "The ACLs applied to the egress interface.";
+        }
+      }
+    }
+  }
+}
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-ethertypes@2019-03-04.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-ethertypes@2019-03-04.yang
new file mode 100644
index 0000000000000000000000000000000000000000..115c05ce0644ccfab07a96f6b8e5bc31b954a5f6
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-ethertypes@2019-03-04.yang
@@ -0,0 +1,381 @@
+module ietf-ethertypes {
+    namespace "urn:ietf:params:xml:ns:yang:ietf-ethertypes";
+    prefix ethertypes;
+
+    organization
+    "IETF NETMOD (Network Modeling) Working Group.";
+
+    contact
+    "WG Web:   <https://datatracker.ietf.org/wg/netmod/>
+    WG List:  <mailto:netmod@ietf.org>
+
+    Editor:   Mahesh Jethanandani
+                <mjethanandani@gmail.com>";
+
+    description
+    "This module contains common definitions for the
+    Ethertype used by different modules.  It is a
+    placeholder module, till such time that IEEE
+    starts a project to define these Ethertypes
+    and publishes a standard.
+
+    At that time, this module can be deprecated.
+
+    Copyright (c) 2019 IETF Trust and the persons identified as
+    the document authors.  All rights reserved.
+
+    Redistribution and use in source and binary forms, with or
+    without modification, is permitted pursuant to, and subject
+    to the license terms contained in, the Simplified BSD
+    License set forth in Section 4.c of the IETF Trust's Legal
+    Provisions Relating to IETF Documents
+    (http://trustee.ietf.org/license-info).
+
+    This version of this YANG module is part of RFC 8519; see
+    the RFC itself for full legal notices.";
+
+    revision 2019-03-04 {
+    description
+        "Initial revision.";
+    reference
+        "RFC 8519: YANG Data Model for Network Access Control
+                Lists (ACLs).";
+    }
+
+    typedef ethertype {
+    type union {
+        type uint16;
+        type enumeration {
+        enum ipv4 {
+            value 2048;
+            description
+            "Internet Protocol version 4 (IPv4) with a
+            hex value of 0x0800.";
+            reference
+            "RFC 791: Internet Protocol.";
+        }
+        enum arp {
+            value 2054;
+            description
+            "Address Resolution Protocol (ARP) with a
+            hex value of 0x0806.";
+            reference
+            "RFC 826: An Ethernet Address Resolution Protocol: Or
+                        Converting Network Protocol Addresses to 48.bit
+                        Ethernet Address for Transmission on Ethernet
+                        Hardware.";
+        }
+        enum wlan {
+            value 2114;
+            description
+            "Wake-on-LAN.  Hex value of 0x0842.";
+        }
+        enum trill {
+            value 8947;
+            description
+            "Transparent Interconnection of Lots of Links.
+            Hex value of 0x22F3.";
+            reference
+            "RFC 6325: Routing Bridges (RBridges): Base Protocol
+                        Specification.";
+        }
+        enum srp {
+            value 8938;
+            description
+            "Stream Reservation Protocol.  Hex value of
+            0x22EA.";
+            reference
+            "IEEE 801.1Q-2011.";
+        }
+        enum decnet {
+            value 24579;
+            description
+            "DECnet Phase IV.  Hex value of 0x6003.";
+        }
+        enum rarp {
+            value 32821;
+            description
+            "Reverse Address Resolution Protocol.
+            Hex value 0x8035.";
+            reference
+            "RFC 903: A Reverse Address Resolution Protocol.";
+        }
+        enum appletalk {
+            value 32923;
+            description
+            "Appletalk (Ethertalk).  Hex value of 0x809B.";
+        }
+        enum aarp {
+            value 33011;
+            description
+            "Appletalk Address Resolution Protocol.  Hex value
+            of 0x80F3.";
+        }
+        enum vlan {
+            value 33024;
+            description
+            "VLAN-tagged frame (IEEE 802.1Q) and Shortest Path
+            Bridging IEEE 802.1aq with Network-Network
+            Interface (NNI) compatibility.  Hex value of
+            0x8100.";
+            reference
+            "IEEE 802.1Q.";
+        }
+        enum ipx {
+            value 33079;
+            description
+            "Internetwork Packet Exchange (IPX).  Hex value
+            of 0x8137.";
+        }
+        enum qnx {
+            value 33284;
+            description
+            "QNX Qnet.  Hex value of 0x8204.";
+        }
+        enum ipv6 {
+            value 34525;
+            description
+            "Internet Protocol Version 6 (IPv6).  Hex value
+            of 0x86DD.";
+            reference
+            "RFC 8200: Internet Protocol, Version 6 (IPv6)
+                        Specification
+            RFC 8201: Path MTU Discovery for IP version 6.";
+        }
+        enum efc {
+            value 34824;
+            description
+            "Ethernet flow control using pause frames.
+            Hex value of 0x8808.";
+            reference
+            "IEEE 802.1Qbb.";
+        }
+        enum esp {
+            value 34825;
+            description
+            "Ethernet Slow Protocol.  Hex value of 0x8809.";
+            reference
+            "IEEE 802.3-2015.";
+        }
+        enum cobranet {
+            value 34841;
+            description
+            "CobraNet.  Hex value of 0x8819.";
+        }
+        enum mpls-unicast {
+            value 34887;
+            description
+            "Multiprotocol Label Switching (MPLS) unicast traffic.
+            Hex value of 0x8847.";
+            reference
+            "RFC 3031: Multiprotocol Label Switching Architecture.";
+        }
+        enum mpls-multicast {
+            value 34888;
+            description
+            "MPLS multicast traffic.  Hex value of 0x8848.";
+            reference
+            "RFC 3031: Multiprotocol Label Switching Architecture.";
+        }
+        enum pppoe-discovery {
+            value 34915;
+            description
+            "Point-to-Point Protocol over Ethernet.  Used during
+            the discovery process.  Hex value of 0x8863.";
+            reference
+            "RFC 2516: A Method for Transmitting PPP Over Ethernet
+                        (PPPoE).";
+        }
+        enum pppoe-session {
+            value 34916;
+            description
+            "Point-to-Point Protocol over Ethernet.  Used during
+            session stage.  Hex value of 0x8864.";
+            reference
+            "RFC 2516: A Method for Transmitting PPP Over Ethernet
+                        (PPPoE).";
+        }
+        enum intel-ans {
+            value 34925;
+            description
+            "Intel Advanced Networking Services.  Hex value of
+            0x886D.";
+        }
+        enum jumbo-frames {
+            value 34928;
+            description
+            "Jumbo frames or Ethernet frames with more than
+            1500 bytes of payload, up to 9000 bytes.";
+        }
+        enum homeplug {
+            value 34939;
+            description
+            "Family name for the various power line
+            communications.  Hex value of 0x887B.";
+        }
+        enum eap {
+            value 34958;
+            description
+            "Ethernet Access Protocol (EAP) over LAN.  Hex value
+            of 0x888E.";
+            reference
+            "IEEE 802.1X.";
+        }
+        enum profinet {
+            value 34962;
+            description
+            "PROcess FIeld Net (PROFINET).  Hex value of 0x8892.";
+        }
+        enum hyperscsi {
+            value 34970;
+            description
+            "Small Computer System Interface (SCSI) over Ethernet.
+            Hex value of 0x889A.";
+        }
+        enum aoe {
+            value 34978;
+            description
+            "Advanced Technology Advancement (ATA) over Ethernet.
+            Hex value of 0x88A2.";
+        }
+        enum ethercat {
+            value 34980;
+            description
+            "Ethernet for Control Automation Technology (EtherCAT).
+            Hex value of 0x88A4.";
+        }
+        enum provider-bridging {
+            value 34984;
+            description
+            "Provider Bridging (802.1ad) and Shortest Path Bridging
+            (801.1aq).  Hex value of 0x88A8.";
+            reference
+            "IEEE 802.1ad and IEEE 802.1aq).";
+        }
+        enum ethernet-powerlink {
+            value 34987;
+            description
+            "Ethernet Powerlink.  Hex value of 0x88AB.";
+        }
+        enum goose {
+            value 35000;
+            description
+            "Generic Object Oriented Substation Event (GOOSE).
+            Hex value of 0x88B8.";
+            reference
+            "IEC/ISO 8802-2 and 8802-3.";
+        }
+        enum gse {
+            value 35001;
+            description
+            "Generic Substation Events.  Hex value of 88B9.";
+            reference
+            "IEC 61850.";
+        }
+        enum sv {
+            value 35002;
+            description
+            "Sampled Value Transmission.  Hex value of 0x88BA.";
+            reference
+            "IEC 61850.";
+        }
+        enum lldp {
+            value 35020;
+            description
+            "Link Layer Discovery Protocol (LLDP).  Hex value of
+            0x88CC.";
+            reference
+            "IEEE 802.1AB.";
+        }
+        enum sercos {
+            value 35021;
+            description
+            "Sercos Interface.  Hex value of 0x88CD.";
+        }
+        enum wsmp {
+            value 35036;
+            description
+            "WAVE Short Message Protocol (WSMP).  Hex value of
+            0x88DC.";
+        }
+        enum homeplug-av-mme {
+            value 35041;
+            description
+            "HomePlug AV Mobile Management Entity (MME).  Hex value
+            of 88E1.";
+        }
+        enum mrp {
+            value 35043;
+            description
+            "Media Redundancy Protocol (MRP).  Hex value of
+            0x88E3.";
+            reference
+            "IEC 62439-2.";
+        }
+        enum macsec {
+            value 35045;
+            description
+            "MAC Security.  Hex value of 0x88E5.";
+            reference
+            "IEEE 802.1AE.";
+        }
+        enum pbb {
+            value 35047;
+            description
+            "Provider Backbone Bridges (PBB).  Hex value of
+            0x88E7.";
+            reference
+            "IEEE 802.1ah.";
+        }
+        enum cfm {
+            value 35074;
+            description
+            "Connectivity Fault Management (CFM).  Hex value of
+            0x8902.";
+            reference
+            "IEEE 802.1ag.";
+        }
+        enum fcoe {
+            value 35078;
+            description
+            "Fiber Channel over Ethernet (FCoE).  Hex value of
+            0x8906.";
+            reference
+            "T11 FC-BB-5.";
+        }
+        enum fcoe-ip {
+            value 35092;
+            description
+            "FCoE Initialization Protocol.  Hex value of 0x8914.";
+        }
+        enum roce {
+            value 35093;
+            description
+            "RDMA over Converged Ethernet (RoCE).  Hex value of
+            0x8915.";
+        }
+        enum tte {
+            value 35101;
+            description
+            "TTEthernet Protocol Control Frame (TTE).  Hex value
+            of 0x891D.";
+            reference
+            "SAE AS6802.";
+        }
+        enum hsr {
+            value 35119;
+            description
+            "High-availability Seamless Redundancy (HSR).  Hex
+            value of 0x892F.";
+            reference
+            "IEC 62439-3:2016.";
+        }
+        }
+    }
+    description
+        "The uint16 type placeholder is defined to enable
+        users to manage their own ethertypes not
+        covered by the module.  Otherwise, the module contains
+        enum definitions for the more commonly used ethertypes.";
+    }
+}
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-inet-types@2013-07-15.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-inet-types@2013-07-15.yang
new file mode 100644
index 0000000000000000000000000000000000000000..790bafc31dd7dc3582ef1c765fe104145b8a6016
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-inet-types@2013-07-15.yang
@@ -0,0 +1,459 @@
+   module ietf-inet-types {
+
+     namespace "urn:ietf:params:xml:ns:yang:ietf-inet-types";
+     prefix "inet";
+
+     organization
+      "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+     contact
+      "WG Web:   <http://tools.ietf.org/wg/netmod/>
+       WG List:  <mailto:netmod@ietf.org>
+
+       WG Chair: David Kessens
+                 <mailto:david.kessens@nsn.com>
+
+       WG Chair: Juergen Schoenwaelder
+                 <mailto:j.schoenwaelder@jacobs-university.de>
+
+       Editor:   Juergen Schoenwaelder
+                 <mailto:j.schoenwaelder@jacobs-university.de>";
+
+     description
+      "This module contains a collection of generally useful derived
+       YANG data types for Internet addresses and related things.
+
+       Copyright (c) 2013 IETF Trust and the persons identified as
+       authors of the code.  All rights reserved.
+
+       Redistribution and use in source and binary forms, with or
+       without modification, is permitted pursuant to, and subject
+       to the license terms contained in, the Simplified BSD License
+       set forth in Section 4.c of the IETF Trust's Legal Provisions
+       Relating to IETF Documents
+       (http://trustee.ietf.org/license-info).
+
+       This version of this YANG module is part of RFC 6991; see
+       the RFC itself for full legal notices.";
+
+     revision 2013-07-15 {
+       description
+        "This revision adds the following new data types:
+         - ip-address-no-zone
+         - ipv4-address-no-zone
+         - ipv6-address-no-zone";
+       reference
+        "RFC 6991: Common YANG Data Types";
+     }
+
+     revision 2010-09-24 {
+       description
+        "Initial revision.";
+       reference
+        "RFC 6021: Common YANG Data Types";
+     }
+
+     /*** collection of types related to protocol fields ***/
+
+     typedef ip-version {
+       type enumeration {
+         enum unknown {
+           value "0";
+           description
+            "An unknown or unspecified version of the Internet
+             protocol.";
+         }
+         enum ipv4 {
+           value "1";
+           description
+            "The IPv4 protocol as defined in RFC 791.";
+         }
+         enum ipv6 {
+           value "2";
+           description
+            "The IPv6 protocol as defined in RFC 2460.";
+         }
+       }
+       description
+        "This value represents the version of the IP protocol.
+
+         In the value set and its semantics, this type is equivalent
+         to the InetVersion textual convention of the SMIv2.";
+       reference
+        "RFC  791: Internet Protocol
+         RFC 2460: Internet Protocol, Version 6 (IPv6) Specification
+         RFC 4001: Textual Conventions for Internet Network Addresses";
+     }
+
+     typedef dscp {
+       type uint8 {
+         range "0..63";
+       }
+       description
+        "The dscp type represents a Differentiated Services Code Point
+         that may be used for marking packets in a traffic stream.
+
+         In the value set and its semantics, this type is equivalent
+         to the Dscp textual convention of the SMIv2.";
+       reference
+        "RFC 3289: Management Information Base for the Differentiated
+                   Services Architecture
+         RFC 2474: Definition of the Differentiated Services Field
+                   (DS Field) in the IPv4 and IPv6 Headers
+         RFC 2780: IANA Allocation Guidelines For Values In
+                   the Internet Protocol and Related Headers";
+     }
+
+     typedef ipv6-flow-label {
+       type uint32 {
+         range "0..1048575";
+       }
+       description
+        "The ipv6-flow-label type represents the flow identifier or Flow
+         Label in an IPv6 packet header that may be used to
+         discriminate traffic flows.
+
+         In the value set and its semantics, this type is equivalent
+         to the IPv6FlowLabel textual convention of the SMIv2.";
+       reference
+        "RFC 3595: Textual Conventions for IPv6 Flow Label
+         RFC 2460: Internet Protocol, Version 6 (IPv6) Specification";
+     }
+
+     typedef port-number {
+       type uint16 {
+         range "0..65535";
+       }
+       description
+        "The port-number type represents a 16-bit port number of an
+         Internet transport-layer protocol such as UDP, TCP, DCCP, or
+         SCTP.  Port numbers are assigned by IANA.  A current list of
+         all assignments is available from <http://www.iana.org/>.
+
+         Note that the port number value zero is reserved by IANA.  In
+         situations where the value zero does not make sense, it can
+         be excluded by subtyping the port-number type.
+         In the value set and its semantics, this type is equivalent
+         to the InetPortNumber textual convention of the SMIv2.";
+       reference
+        "RFC  768: User Datagram Protocol
+         RFC  793: Transmission Control Protocol
+         RFC 4960: Stream Control Transmission Protocol
+         RFC 4340: Datagram Congestion Control Protocol (DCCP)
+         RFC 4001: Textual Conventions for Internet Network Addresses";
+     }
+
+     /*** collection of types related to autonomous systems ***/
+
+     typedef as-number {
+       type uint32;
+       description
+        "The as-number type represents autonomous system numbers
+         which identify an Autonomous System (AS).  An AS is a set
+         of routers under a single technical administration, using
+         an interior gateway protocol and common metrics to route
+         packets within the AS, and using an exterior gateway
+         protocol to route packets to other ASes.  IANA maintains
+         the AS number space and has delegated large parts to the
+         regional registries.
+
+         Autonomous system numbers were originally limited to 16
+         bits.  BGP extensions have enlarged the autonomous system
+         number space to 32 bits.  This type therefore uses an uint32
+         base type without a range restriction in order to support
+         a larger autonomous system number space.
+
+         In the value set and its semantics, this type is equivalent
+         to the InetAutonomousSystemNumber textual convention of
+         the SMIv2.";
+       reference
+        "RFC 1930: Guidelines for creation, selection, and registration
+                   of an Autonomous System (AS)
+         RFC 4271: A Border Gateway Protocol 4 (BGP-4)
+         RFC 4001: Textual Conventions for Internet Network Addresses
+         RFC 6793: BGP Support for Four-Octet Autonomous System (AS)
+                   Number Space";
+     }
+
+     /*** collection of types related to IP addresses and hostnames ***/
+
+     typedef ip-address {
+       type union {
+         type inet:ipv4-address;
+         type inet:ipv6-address;
+       }
+       description
+        "The ip-address type represents an IP address and is IP
+         version neutral.  The format of the textual representation
+         implies the IP version.  This type supports scoped addresses
+         by allowing zone identifiers in the address format.";
+       reference
+        "RFC 4007: IPv6 Scoped Address Architecture";
+     }
+
+     typedef ipv4-address {
+       type string {
+         pattern
+           '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+         +  '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'
+         + '(%[\p{N}\p{L}]+)?';
+       }
+       description
+         "The ipv4-address type represents an IPv4 address in
+          dotted-quad notation.  The IPv4 address may include a zone
+          index, separated by a % sign.
+
+          The zone index is used to disambiguate identical address
+          values.  For link-local addresses, the zone index will
+          typically be the interface index number or the name of an
+          interface.  If the zone index is not present, the default
+          zone of the device will be used.
+
+          The canonical format for the zone index is the numerical
+          format";
+     }
+
+     typedef ipv6-address {
+       type string {
+         pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}'
+               + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|'
+               + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}'
+               + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))'
+               + '(%[\p{N}\p{L}]+)?';
+         pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|'
+               + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)'
+               + '(%.+)?';
+       }
+       description
+        "The ipv6-address type represents an IPv6 address in full,
+         mixed, shortened, and shortened-mixed notation.  The IPv6
+         address may include a zone index, separated by a % sign.
+
+         The zone index is used to disambiguate identical address
+         values.  For link-local addresses, the zone index will
+         typically be the interface index number or the name of an
+         interface.  If the zone index is not present, the default
+         zone of the device will be used.
+
+         The canonical format of IPv6 addresses uses the textual
+         representation defined in Section 4 of RFC 5952.  The
+         canonical format for the zone index is the numerical
+         format as described in Section 11.2 of RFC 4007.";
+       reference
+        "RFC 4291: IP Version 6 Addressing Architecture
+         RFC 4007: IPv6 Scoped Address Architecture
+         RFC 5952: A Recommendation for IPv6 Address Text
+                   Representation";
+     }
+
+     typedef ip-address-no-zone {
+       type union {
+         type inet:ipv4-address-no-zone;
+         type inet:ipv6-address-no-zone;
+       }
+       description
+        "The ip-address-no-zone type represents an IP address and is
+         IP version neutral.  The format of the textual representation
+         implies the IP version.  This type does not support scoped
+         addresses since it does not allow zone identifiers in the
+         address format.";
+       reference
+        "RFC 4007: IPv6 Scoped Address Architecture";
+     }
+
+     typedef ipv4-address-no-zone {
+       type inet:ipv4-address {
+         pattern '[0-9\.]*';
+       }
+       description
+         "An IPv4 address without a zone index.  This type, derived from
+          ipv4-address, may be used in situations where the zone is
+          known from the context and hence no zone index is needed.";
+     }
+
+     typedef ipv6-address-no-zone {
+       type inet:ipv6-address {
+         pattern '[0-9a-fA-F:\.]*';
+       }
+       description
+         "An IPv6 address without a zone index.  This type, derived from
+          ipv6-address, may be used in situations where the zone is
+          known from the context and hence no zone index is needed.";
+       reference
+        "RFC 4291: IP Version 6 Addressing Architecture
+         RFC 4007: IPv6 Scoped Address Architecture
+         RFC 5952: A Recommendation for IPv6 Address Text
+                   Representation";
+     }
+
+     typedef ip-prefix {
+       type union {
+         type inet:ipv4-prefix;
+         type inet:ipv6-prefix;
+       }
+       description
+        "The ip-prefix type represents an IP prefix and is IP
+         version neutral.  The format of the textual representations
+         implies the IP version.";
+     }
+
+     typedef ipv4-prefix {
+       type string {
+         pattern
+            '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+          +  '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'
+          + '/(([0-9])|([1-2][0-9])|(3[0-2]))';
+       }
+       description
+        "The ipv4-prefix type represents an IPv4 address prefix.
+         The prefix length is given by the number following the
+         slash character and must be less than or equal to 32.
+
+         A prefix length value of n corresponds to an IP address
+         mask that has n contiguous 1-bits from the most
+         significant bit (MSB) and all other bits set to 0.
+
+         The canonical format of an IPv4 prefix has all bits of
+         the IPv4 address set to zero that are not part of the
+         IPv4 prefix.";
+     }
+
+     typedef ipv6-prefix {
+       type string {
+         pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}'
+               + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|'
+               + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}'
+               + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))'
+               + '(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))';
+         pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|'
+               + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)'
+               + '(/.+)';
+       }
+
+       description
+        "The ipv6-prefix type represents an IPv6 address prefix.
+         The prefix length is given by the number following the
+         slash character and must be less than or equal to 128.
+
+         A prefix length value of n corresponds to an IP address
+         mask that has n contiguous 1-bits from the most
+         significant bit (MSB) and all other bits set to 0.
+
+         The IPv6 address should have all bits that do not belong
+         to the prefix set to zero.
+
+         The canonical format of an IPv6 prefix has all bits of
+         the IPv6 address set to zero that are not part of the
+         IPv6 prefix.  Furthermore, the IPv6 address is represented
+         as defined in Section 4 of RFC 5952.";
+       reference
+        "RFC 5952: A Recommendation for IPv6 Address Text
+                   Representation";
+     }
+
+     /*** collection of domain name and URI types ***/
+
+     typedef domain-name {
+       type string {
+         pattern
+           '((([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.)*'
+         + '([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.?)'
+         + '|\.';
+         length "1..253";
+       }
+       description
+        "The domain-name type represents a DNS domain name.  The
+         name SHOULD be fully qualified whenever possible.
+
+         Internet domain names are only loosely specified.  Section
+         3.5 of RFC 1034 recommends a syntax (modified in Section
+         2.1 of RFC 1123).  The pattern above is intended to allow
+         for current practice in domain name use, and some possible
+         future expansion.  It is designed to hold various types of
+         domain names, including names used for A or AAAA records
+         (host names) and other records, such as SRV records.  Note
+         that Internet host names have a stricter syntax (described
+         in RFC 952) than the DNS recommendations in RFCs 1034 and
+         1123, and that systems that want to store host names in
+         schema nodes using the domain-name type are recommended to
+         adhere to this stricter standard to ensure interoperability.
+
+         The encoding of DNS names in the DNS protocol is limited
+         to 255 characters.  Since the encoding consists of labels
+         prefixed by a length bytes and there is a trailing NULL
+         byte, only 253 characters can appear in the textual dotted
+         notation.
+
+         The description clause of schema nodes using the domain-name
+         type MUST describe when and how these names are resolved to
+         IP addresses.  Note that the resolution of a domain-name value
+         may require to query multiple DNS records (e.g., A for IPv4
+         and AAAA for IPv6).  The order of the resolution process and
+         which DNS record takes precedence can either be defined
+         explicitly or may depend on the configuration of the
+         resolver.
+
+         Domain-name values use the US-ASCII encoding.  Their canonical
+         format uses lowercase US-ASCII characters.  Internationalized
+         domain names MUST be A-labels as per RFC 5890.";
+       reference
+        "RFC  952: DoD Internet Host Table Specification
+         RFC 1034: Domain Names - Concepts and Facilities
+         RFC 1123: Requirements for Internet Hosts -- Application
+                   and Support
+         RFC 2782: A DNS RR for specifying the location of services
+                   (DNS SRV)
+         RFC 5890: Internationalized Domain Names in Applications
+                   (IDNA): Definitions and Document Framework";
+     }
+
+     typedef host {
+       type union {
+         type inet:ip-address;
+         type inet:domain-name;
+       }
+       description
+        "The host type represents either an IP address or a DNS
+         domain name.";
+     }
+
+     typedef uri {
+       type string;
+       description
+        "The uri type represents a Uniform Resource Identifier
+         (URI) as defined by STD 66.
+
+         Objects using the uri type MUST be in US-ASCII encoding,
+         and MUST be normalized as described by RFC 3986 Sections
+         6.2.1, 6.2.2.1, and 6.2.2.2.  All unnecessary
+         percent-encoding is removed, and all case-insensitive
+         characters are set to lowercase except for hexadecimal
+         digits, which are normalized to uppercase as described in
+         Section 6.2.2.1.
+
+         The purpose of this normalization is to help provide
+         unique URIs.  Note that this normalization is not
+         sufficient to provide uniqueness.  Two URIs that are
+         textually distinct after this normalization may still be
+         equivalent.
+
+         Objects using the uri type may restrict the schemes that
+         they permit.  For example, 'data:' and 'urn:' schemes
+         might not be appropriate.
+
+         A zero-length URI is not a valid URI.  This can be used to
+         express 'URI absent' where required.
+
+         In the value set and its semantics, this type is equivalent
+         to the Uri SMIv2 textual convention defined in RFC 5017.";
+       reference
+        "RFC 3986: Uniform Resource Identifier (URI): Generic Syntax
+         RFC 3305: Report from the Joint W3C/IETF URI Planning Interest
+                   Group: Uniform Resource Identifiers (URIs), URLs,
+                   and Uniform Resource Names (URNs): Clarifications
+                   and Recommendations
+         RFC 5017: MIB Textual Conventions for Uniform Resource
+                   Identifiers (URIs)";
+     }
+
+   }
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-interfaces@2018-02-20.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-interfaces@2018-02-20.yang
new file mode 100644
index 0000000000000000000000000000000000000000..e53675b9d3caab79e15e1d7453d118df8c177089
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-interfaces@2018-02-20.yang
@@ -0,0 +1,1123 @@
+module ietf-interfaces {
+  yang-version 1.1;
+  namespace "urn:ietf:params:xml:ns:yang:ietf-interfaces";
+  prefix if;
+
+  import ietf-yang-types {
+    prefix yang;
+  }
+
+  organization
+    "IETF NETMOD (Network Modeling) Working Group";
+
+  contact
+    "WG Web:   <https://datatracker.ietf.org/wg/netmod/>
+     WG List:  <mailto:netmod@ietf.org>
+
+     Editor:   Martin Bjorklund
+               <mailto:mbj@tail-f.com>";
+
+  description
+    "This module contains a collection of YANG definitions for
+     managing network interfaces.
+
+     Copyright (c) 2018 IETF Trust and the persons identified as
+     authors of the code.  All rights reserved.
+
+     Redistribution and use in source and binary forms, with or
+     without modification, is permitted pursuant to, and subject
+     to the license terms contained in, the Simplified BSD License
+     set forth in Section 4.c of the IETF Trust's Legal Provisions
+     Relating to IETF Documents
+     (https://trustee.ietf.org/license-info).
+
+     This version of this YANG module is part of RFC 8343; see
+     the RFC itself for full legal notices.";
+
+  revision 2018-02-20 {
+    description
+      "Updated to support NMDA.";
+    reference
+      "RFC 8343: A YANG Data Model for Interface Management";
+  }
+
+  revision 2014-05-08 {
+    description
+      "Initial revision.";
+    reference
+      "RFC 7223: A YANG Data Model for Interface Management";
+  }
+
+  /*
+   * Typedefs
+   */
+
+  typedef interface-ref {
+    type leafref {
+      path "/if:interfaces/if:interface/if:name";
+    }
+    description
+      "This type is used by data models that need to reference
+       interfaces.";
+  }
+
+  /*
+   * Identities
+   */
+
+  identity interface-type {
+    description
+      "Base identity from which specific interface types are
+       derived.";
+  }
+
+  /*
+   * Features
+   */
+
+  feature arbitrary-names {
+    description
+      "This feature indicates that the device allows user-controlled
+       interfaces to be named arbitrarily.";
+  }
+  feature pre-provisioning {
+    description
+      "This feature indicates that the device supports
+       pre-provisioning of interface configuration, i.e., it is
+       possible to configure an interface whose physical interface
+       hardware is not present on the device.";
+  }
+  feature if-mib {
+    description
+      "This feature indicates that the device implements
+       the IF-MIB.";
+    reference
+      "RFC 2863: The Interfaces Group MIB";
+  }
+
+  /*
+   * Data nodes
+   */
+
+  container interfaces {
+    description
+      "Interface parameters.";
+
+    list interface {
+      key "name";
+
+      description
+        "The list of interfaces on the device.
+
+         The status of an interface is available in this list in the
+         operational state.  If the configuration of a
+         system-controlled interface cannot be used by the system
+         (e.g., the interface hardware present does not match the
+         interface type), then the configuration is not applied to
+         the system-controlled interface shown in the operational
+         state.  If the configuration of a user-controlled interface
+         cannot be used by the system, the configured interface is
+         not instantiated in the operational state.
+
+         System-controlled interfaces created by the system are
+         always present in this list in the operational state,
+         whether or not they are configured.";
+
+      leaf name {
+        type string;
+        description
+          "The name of the interface.
+
+           A device MAY restrict the allowed values for this leaf,
+           possibly depending on the type of the interface.
+           For system-controlled interfaces, this leaf is the
+           device-specific name of the interface.
+
+           If a client tries to create configuration for a
+           system-controlled interface that is not present in the
+           operational state, the server MAY reject the request if
+           the implementation does not support pre-provisioning of
+           interfaces or if the name refers to an interface that can
+           never exist in the system.  A Network Configuration
+           Protocol (NETCONF) server MUST reply with an rpc-error
+           with the error-tag 'invalid-value' in this case.
+
+           If the device supports pre-provisioning of interface
+           configuration, the 'pre-provisioning' feature is
+           advertised.
+
+           If the device allows arbitrarily named user-controlled
+           interfaces, the 'arbitrary-names' feature is advertised.
+
+           When a configured user-controlled interface is created by
+           the system, it is instantiated with the same name in the
+           operational state.
+
+           A server implementation MAY map this leaf to the ifName
+           MIB object.  Such an implementation needs to use some
+           mechanism to handle the differences in size and characters
+           allowed between this leaf and ifName.  The definition of
+           such a mechanism is outside the scope of this document.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifName";
+      }
+
+      leaf description {
+        type string;
+        description
+          "A textual description of the interface.
+
+           A server implementation MAY map this leaf to the ifAlias
+           MIB object.  Such an implementation needs to use some
+           mechanism to handle the differences in size and characters
+           allowed between this leaf and ifAlias.  The definition of
+           such a mechanism is outside the scope of this document.
+
+           Since ifAlias is defined to be stored in non-volatile
+           storage, the MIB implementation MUST map ifAlias to the
+           value of 'description' in the persistently stored
+           configuration.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifAlias";
+      }
+
+      leaf type {
+        type identityref {
+          base interface-type;
+        }
+        mandatory true;
+        description
+          "The type of the interface.
+
+           When an interface entry is created, a server MAY
+           initialize the type leaf with a valid value, e.g., if it
+           is possible to derive the type from the name of the
+           interface.
+
+           If a client tries to set the type of an interface to a
+           value that can never be used by the system, e.g., if the
+           type is not supported or if the type does not match the
+           name of the interface, the server MUST reject the request.
+           A NETCONF server MUST reply with an rpc-error with the
+           error-tag 'invalid-value' in this case.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifType";
+      }
+
+      leaf enabled {
+        type boolean;
+        default "true";
+        description
+          "This leaf contains the configured, desired state of the
+           interface.
+
+           Systems that implement the IF-MIB use the value of this
+           leaf in the intended configuration to set
+           IF-MIB.ifAdminStatus to 'up' or 'down' after an ifEntry
+           has been initialized, as described in RFC 2863.
+
+           Changes in this leaf in the intended configuration are
+           reflected in ifAdminStatus.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifAdminStatus";
+      }
+
+      leaf link-up-down-trap-enable {
+        if-feature if-mib;
+        type enumeration {
+          enum enabled {
+            value 1;
+            description
+              "The device will generate linkUp/linkDown SNMP
+               notifications for this interface.";
+          }
+          enum disabled {
+            value 2;
+            description
+              "The device will not generate linkUp/linkDown SNMP
+               notifications for this interface.";
+          }
+        }
+        description
+          "Controls whether linkUp/linkDown SNMP notifications
+           should be generated for this interface.
+
+           If this node is not configured, the value 'enabled' is
+           operationally used by the server for interfaces that do
+           not operate on top of any other interface (i.e., there are
+           no 'lower-layer-if' entries), and 'disabled' otherwise.";
+        reference
+          "RFC 2863: The Interfaces Group MIB -
+                     ifLinkUpDownTrapEnable";
+      }
+
+      leaf admin-status {
+        if-feature if-mib;
+        type enumeration {
+          enum up {
+            value 1;
+            description
+              "Ready to pass packets.";
+          }
+          enum down {
+            value 2;
+            description
+              "Not ready to pass packets and not in some test mode.";
+          }
+          enum testing {
+            value 3;
+            description
+              "In some test mode.";
+          }
+        }
+        config false;
+        mandatory true;
+        description
+          "The desired state of the interface.
+
+           This leaf has the same read semantics as ifAdminStatus.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifAdminStatus";
+      }
+
+      leaf oper-status {
+        type enumeration {
+          enum up {
+            value 1;
+            description
+              "Ready to pass packets.";
+          }
+          enum down {
+            value 2;
+
+            description
+              "The interface does not pass any packets.";
+          }
+          enum testing {
+            value 3;
+            description
+              "In some test mode.  No operational packets can
+               be passed.";
+          }
+          enum unknown {
+            value 4;
+            description
+              "Status cannot be determined for some reason.";
+          }
+          enum dormant {
+            value 5;
+            description
+              "Waiting for some external event.";
+          }
+          enum not-present {
+            value 6;
+            description
+              "Some component (typically hardware) is missing.";
+          }
+          enum lower-layer-down {
+            value 7;
+            description
+              "Down due to state of lower-layer interface(s).";
+          }
+        }
+        config false;
+        mandatory true;
+        description
+          "The current operational state of the interface.
+
+           This leaf has the same semantics as ifOperStatus.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifOperStatus";
+      }
+
+      leaf last-change {
+        type yang:date-and-time;
+        config false;
+        description
+          "The time the interface entered its current operational
+           state.  If the current state was entered prior to the
+           last re-initialization of the local network management
+           subsystem, then this node is not present.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifLastChange";
+      }
+
+      leaf if-index {
+        if-feature if-mib;
+        type int32 {
+          range "1..2147483647";
+        }
+        config false;
+        mandatory true;
+        description
+          "The ifIndex value for the ifEntry represented by this
+           interface.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifIndex";
+      }
+
+      leaf phys-address {
+        type yang:phys-address;
+        config false;
+        description
+          "The interface's address at its protocol sub-layer.  For
+           example, for an 802.x interface, this object normally
+           contains a Media Access Control (MAC) address.  The
+           interface's media-specific modules must define the bit
+           and byte ordering and the format of the value of this
+           object.  For interfaces that do not have such an address
+           (e.g., a serial line), this node is not present.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifPhysAddress";
+      }
+
+      leaf-list higher-layer-if {
+        type interface-ref;
+        config false;
+        description
+          "A list of references to interfaces layered on top of this
+           interface.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifStackTable";
+      }
+
+      leaf-list lower-layer-if {
+        type interface-ref;
+        config false;
+
+        description
+          "A list of references to interfaces layered underneath this
+           interface.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifStackTable";
+      }
+
+      leaf speed {
+        type yang:gauge64;
+        units "bits/second";
+        config false;
+        description
+            "An estimate of the interface's current bandwidth in bits
+             per second.  For interfaces that do not vary in
+             bandwidth or for those where no accurate estimation can
+             be made, this node should contain the nominal bandwidth.
+             For interfaces that have no concept of bandwidth, this
+             node is not present.";
+        reference
+          "RFC 2863: The Interfaces Group MIB -
+                     ifSpeed, ifHighSpeed";
+      }
+
+      container statistics {
+        config false;
+        description
+          "A collection of interface-related statistics objects.";
+
+        leaf discontinuity-time {
+          type yang:date-and-time;
+          mandatory true;
+          description
+            "The time on the most recent occasion at which any one or
+             more of this interface's counters suffered a
+             discontinuity.  If no such discontinuities have occurred
+             since the last re-initialization of the local management
+             subsystem, then this node contains the time the local
+             management subsystem re-initialized itself.";
+        }
+
+        leaf in-octets {
+          type yang:counter64;
+          description
+            "The total number of octets received on the interface,
+             including framing characters.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifHCInOctets";
+        }
+
+        leaf in-unicast-pkts {
+          type yang:counter64;
+          description
+            "The number of packets, delivered by this sub-layer to a
+             higher (sub-)layer, that were not addressed to a
+             multicast or broadcast address at this sub-layer.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifHCInUcastPkts";
+        }
+
+        leaf in-broadcast-pkts {
+          type yang:counter64;
+          description
+            "The number of packets, delivered by this sub-layer to a
+             higher (sub-)layer, that were addressed to a broadcast
+             address at this sub-layer.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB -
+                       ifHCInBroadcastPkts";
+        }
+
+        leaf in-multicast-pkts {
+          type yang:counter64;
+          description
+            "The number of packets, delivered by this sub-layer to a
+             higher (sub-)layer, that were addressed to a multicast
+             address at this sub-layer.  For a MAC-layer protocol,
+             this includes both Group and Functional addresses.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB -
+                       ifHCInMulticastPkts";
+        }
+
+        leaf in-discards {
+          type yang:counter32;
+          description
+            "The number of inbound packets that were chosen to be
+             discarded even though no errors had been detected to
+             prevent their being deliverable to a higher-layer
+             protocol.  One possible reason for discarding such a
+             packet could be to free up buffer space.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifInDiscards";
+        }
+
+        leaf in-errors {
+          type yang:counter32;
+          description
+            "For packet-oriented interfaces, the number of inbound
+             packets that contained errors preventing them from being
+             deliverable to a higher-layer protocol.  For character-
+             oriented or fixed-length interfaces, the number of
+             inbound transmission units that contained errors
+             preventing them from being deliverable to a higher-layer
+             protocol.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifInErrors";
+        }
+
+        leaf in-unknown-protos {
+          type yang:counter32;
+
+          description
+            "For packet-oriented interfaces, the number of packets
+             received via the interface that were discarded because
+             of an unknown or unsupported protocol.  For
+             character-oriented or fixed-length interfaces that
+             support protocol multiplexing, the number of
+             transmission units received via the interface that were
+             discarded because of an unknown or unsupported protocol.
+             For any interface that does not support protocol
+             multiplexing, this counter is not present.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifInUnknownProtos";
+        }
+
+        leaf out-octets {
+          type yang:counter64;
+          description
+            "The total number of octets transmitted out of the
+             interface, including framing characters.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifHCOutOctets";
+        }
+
+        leaf out-unicast-pkts {
+          type yang:counter64;
+          description
+            "The total number of packets that higher-level protocols
+             requested be transmitted and that were not addressed
+             to a multicast or broadcast address at this sub-layer,
+             including those that were discarded or not sent.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifHCOutUcastPkts";
+        }
+
+        leaf out-broadcast-pkts {
+          type yang:counter64;
+          description
+            "The total number of packets that higher-level protocols
+             requested be transmitted and that were addressed to a
+             broadcast address at this sub-layer, including those
+             that were discarded or not sent.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB -
+                       ifHCOutBroadcastPkts";
+        }
+
+        leaf out-multicast-pkts {
+          type yang:counter64;
+          description
+            "The total number of packets that higher-level protocols
+             requested be transmitted and that were addressed to a
+             multicast address at this sub-layer, including those
+             that were discarded or not sent.  For a MAC-layer
+             protocol, this includes both Group and Functional
+             addresses.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB -
+                       ifHCOutMulticastPkts";
+        }
+
+        leaf out-discards {
+          type yang:counter32;
+          description
+            "The number of outbound packets that were chosen to be
+             discarded even though no errors had been detected to
+             prevent their being transmitted.  One possible reason
+             for discarding such a packet could be to free up buffer
+             space.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifOutDiscards";
+        }
+
+        leaf out-errors {
+          type yang:counter32;
+          description
+            "For packet-oriented interfaces, the number of outbound
+             packets that could not be transmitted because of errors.
+             For character-oriented or fixed-length interfaces, the
+             number of outbound transmission units that could not be
+             transmitted because of errors.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifOutErrors";
+        }
+      }
+
+    }
+  }
+
+  /*
+   * Legacy typedefs
+   */
+
+  typedef interface-state-ref {
+    type leafref {
+      path "/if:interfaces-state/if:interface/if:name";
+    }
+    status deprecated;
+    description
+      "This type is used by data models that need to reference
+       the operationally present interfaces.";
+  }
+
+  /*
+   * Legacy operational state data nodes
+   */
+
+  container interfaces-state {
+    config false;
+    status deprecated;
+    description
+      "Data nodes for the operational state of interfaces.";
+
+    list interface {
+      key "name";
+      status deprecated;
+
+      description
+        "The list of interfaces on the device.
+
+         System-controlled interfaces created by the system are
+         always present in this list, whether or not they are
+         configured.";
+
+      leaf name {
+        type string;
+        status deprecated;
+        description
+          "The name of the interface.
+
+           A server implementation MAY map this leaf to the ifName
+           MIB object.  Such an implementation needs to use some
+           mechanism to handle the differences in size and characters
+           allowed between this leaf and ifName.  The definition of
+           such a mechanism is outside the scope of this document.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifName";
+      }
+
+      leaf type {
+        type identityref {
+          base interface-type;
+        }
+        mandatory true;
+        status deprecated;
+        description
+          "The type of the interface.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifType";
+      }
+
+      leaf admin-status {
+        if-feature if-mib;
+        type enumeration {
+          enum up {
+            value 1;
+            description
+              "Ready to pass packets.";
+          }
+          enum down {
+            value 2;
+            description
+              "Not ready to pass packets and not in some test mode.";
+          }
+          enum testing {
+            value 3;
+            description
+              "In some test mode.";
+          }
+        }
+        mandatory true;
+        status deprecated;
+        description
+          "The desired state of the interface.
+
+           This leaf has the same read semantics as ifAdminStatus.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifAdminStatus";
+      }
+
+      leaf oper-status {
+        type enumeration {
+          enum up {
+            value 1;
+            description
+              "Ready to pass packets.";
+          }
+          enum down {
+            value 2;
+            description
+              "The interface does not pass any packets.";
+          }
+          enum testing {
+            value 3;
+            description
+              "In some test mode.  No operational packets can
+               be passed.";
+          }
+          enum unknown {
+            value 4;
+            description
+              "Status cannot be determined for some reason.";
+          }
+          enum dormant {
+            value 5;
+            description
+              "Waiting for some external event.";
+          }
+          enum not-present {
+            value 6;
+            description
+              "Some component (typically hardware) is missing.";
+          }
+          enum lower-layer-down {
+            value 7;
+            description
+              "Down due to state of lower-layer interface(s).";
+          }
+        }
+        mandatory true;
+        status deprecated;
+        description
+          "The current operational state of the interface.
+
+           This leaf has the same semantics as ifOperStatus.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifOperStatus";
+      }
+
+      leaf last-change {
+        type yang:date-and-time;
+        status deprecated;
+        description
+          "The time the interface entered its current operational
+           state.  If the current state was entered prior to the
+           last re-initialization of the local network management
+           subsystem, then this node is not present.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifLastChange";
+      }
+
+      leaf if-index {
+        if-feature if-mib;
+        type int32 {
+          range "1..2147483647";
+        }
+        mandatory true;
+        status deprecated;
+        description
+          "The ifIndex value for the ifEntry represented by this
+           interface.";
+
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifIndex";
+      }
+
+      leaf phys-address {
+        type yang:phys-address;
+        status deprecated;
+        description
+          "The interface's address at its protocol sub-layer.  For
+           example, for an 802.x interface, this object normally
+           contains a Media Access Control (MAC) address.  The
+           interface's media-specific modules must define the bit
+           and byte ordering and the format of the value of this
+           object.  For interfaces that do not have such an address
+           (e.g., a serial line), this node is not present.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifPhysAddress";
+      }
+
+      leaf-list higher-layer-if {
+        type interface-state-ref;
+        status deprecated;
+        description
+          "A list of references to interfaces layered on top of this
+           interface.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifStackTable";
+      }
+
+      leaf-list lower-layer-if {
+        type interface-state-ref;
+        status deprecated;
+        description
+          "A list of references to interfaces layered underneath this
+           interface.";
+        reference
+          "RFC 2863: The Interfaces Group MIB - ifStackTable";
+      }
+
+      leaf speed {
+        type yang:gauge64;
+        units "bits/second";
+        status deprecated;
+        description
+            "An estimate of the interface's current bandwidth in bits
+             per second.  For interfaces that do not vary in
+             bandwidth or for those where no accurate estimation can
+
+             be made, this node should contain the nominal bandwidth.
+             For interfaces that have no concept of bandwidth, this
+             node is not present.";
+        reference
+          "RFC 2863: The Interfaces Group MIB -
+                     ifSpeed, ifHighSpeed";
+      }
+
+      container statistics {
+        status deprecated;
+        description
+          "A collection of interface-related statistics objects.";
+
+        leaf discontinuity-time {
+          type yang:date-and-time;
+          mandatory true;
+          status deprecated;
+          description
+            "The time on the most recent occasion at which any one or
+             more of this interface's counters suffered a
+             discontinuity.  If no such discontinuities have occurred
+             since the last re-initialization of the local management
+             subsystem, then this node contains the time the local
+             management subsystem re-initialized itself.";
+        }
+
+        leaf in-octets {
+          type yang:counter64;
+          status deprecated;
+          description
+            "The total number of octets received on the interface,
+             including framing characters.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifHCInOctets";
+        }
+
+        leaf in-unicast-pkts {
+          type yang:counter64;
+          status deprecated;
+          description
+            "The number of packets, delivered by this sub-layer to a
+             higher (sub-)layer, that were not addressed to a
+             multicast or broadcast address at this sub-layer.
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifHCInUcastPkts";
+        }
+
+        leaf in-broadcast-pkts {
+          type yang:counter64;
+          status deprecated;
+          description
+            "The number of packets, delivered by this sub-layer to a
+             higher (sub-)layer, that were addressed to a broadcast
+             address at this sub-layer.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB -
+                       ifHCInBroadcastPkts";
+        }
+
+        leaf in-multicast-pkts {
+          type yang:counter64;
+          status deprecated;
+          description
+            "The number of packets, delivered by this sub-layer to a
+             higher (sub-)layer, that were addressed to a multicast
+             address at this sub-layer.  For a MAC-layer protocol,
+             this includes both Group and Functional addresses.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB -
+                       ifHCInMulticastPkts";
+        }
+
+        leaf in-discards {
+          type yang:counter32;
+          status deprecated;
+
+          description
+            "The number of inbound packets that were chosen to be
+             discarded even though no errors had been detected to
+             prevent their being deliverable to a higher-layer
+             protocol.  One possible reason for discarding such a
+             packet could be to free up buffer space.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifInDiscards";
+        }
+
+        leaf in-errors {
+          type yang:counter32;
+          status deprecated;
+          description
+            "For packet-oriented interfaces, the number of inbound
+             packets that contained errors preventing them from being
+             deliverable to a higher-layer protocol.  For character-
+             oriented or fixed-length interfaces, the number of
+             inbound transmission units that contained errors
+             preventing them from being deliverable to a higher-layer
+             protocol.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifInErrors";
+        }
+
+        leaf in-unknown-protos {
+          type yang:counter32;
+          status deprecated;
+          description
+            "For packet-oriented interfaces, the number of packets
+             received via the interface that were discarded because
+             of an unknown or unsupported protocol.  For
+             character-oriented or fixed-length interfaces that
+             support protocol multiplexing, the number of
+             transmission units received via the interface that were
+             discarded because of an unknown or unsupported protocol.
+             For any interface that does not support protocol
+             multiplexing, this counter is not present.
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifInUnknownProtos";
+        }
+
+        leaf out-octets {
+          type yang:counter64;
+          status deprecated;
+          description
+            "The total number of octets transmitted out of the
+             interface, including framing characters.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifHCOutOctets";
+        }
+
+        leaf out-unicast-pkts {
+          type yang:counter64;
+          status deprecated;
+          description
+            "The total number of packets that higher-level protocols
+             requested be transmitted and that were not addressed
+             to a multicast or broadcast address at this sub-layer,
+             including those that were discarded or not sent.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifHCOutUcastPkts";
+        }
+
+        leaf out-broadcast-pkts {
+          type yang:counter64;
+          status deprecated;
+
+          description
+            "The total number of packets that higher-level protocols
+             requested be transmitted and that were addressed to a
+             broadcast address at this sub-layer, including those
+             that were discarded or not sent.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB -
+                       ifHCOutBroadcastPkts";
+        }
+
+        leaf out-multicast-pkts {
+          type yang:counter64;
+          status deprecated;
+          description
+            "The total number of packets that higher-level protocols
+             requested be transmitted and that were addressed to a
+             multicast address at this sub-layer, including those
+             that were discarded or not sent.  For a MAC-layer
+             protocol, this includes both Group and Functional
+             addresses.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB -
+                       ifHCOutMulticastPkts";
+        }
+
+        leaf out-discards {
+          type yang:counter32;
+          status deprecated;
+          description
+            "The number of outbound packets that were chosen to be
+             discarded even though no errors had been detected to
+             prevent their being transmitted.  One possible reason
+             for discarding such a packet could be to free up buffer
+             space.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifOutDiscards";
+        }
+
+        leaf out-errors {
+          type yang:counter32;
+          status deprecated;
+          description
+            "For packet-oriented interfaces, the number of outbound
+             packets that could not be transmitted because of errors.
+             For character-oriented or fixed-length interfaces, the
+             number of outbound transmission units that could not be
+             transmitted because of errors.
+
+             Discontinuities in the value of this counter can occur
+             at re-initialization of the management system and at
+             other times as indicated by the value of
+             'discontinuity-time'.";
+          reference
+            "RFC 2863: The Interfaces Group MIB - ifOutErrors";
+        }
+      }
+    }
+  }
+}
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-packet-fields@2019-03-04.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-packet-fields@2019-03-04.yang
new file mode 100644
index 0000000000000000000000000000000000000000..2fb797bd87bf4ed825f83ec788df707b94c5f68b
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-packet-fields@2019-03-04.yang
@@ -0,0 +1,576 @@
+module ietf-packet-fields {
+  yang-version 1.1;
+  namespace "urn:ietf:params:xml:ns:yang:ietf-packet-fields";
+  prefix packet-fields;
+
+  import ietf-inet-types {
+    prefix inet;
+    reference
+      "RFC 6991 - Common YANG Data Types.";
+  }
+
+  import ietf-yang-types {
+    prefix yang;
+    reference
+      "RFC 6991 - Common YANG Data Types.";
+  }
+
+  import ietf-ethertypes {
+    prefix eth;
+    reference
+      "RFC 8519 - YANG Data Model for Network Access Control
+                  Lists (ACLs).";
+  }
+
+  organization
+    "IETF NETMOD (Network Modeling) Working Group.";
+
+  contact
+    "WG Web:  <https://datatracker.ietf.org/wg/netmod/>
+     WG List: netmod@ietf.org
+
+     Editor: Mahesh Jethanandani
+             mjethanandani@gmail.com
+     Editor: Lisa Huang
+             huangyi_99@yahoo.com
+     Editor: Sonal Agarwal
+             sagarwal12@gmail.com
+     Editor: Dana Blair
+             dana@blairhome.com";
+
+  description
+    "This YANG module defines groupings that are used by
+     the ietf-access-control-list YANG module.  Their usage
+     is not limited to ietf-access-control-list and can be
+     used anywhere as applicable.
+
+     Copyright (c) 2019 IETF Trust and the persons identified as
+     the document authors.  All rights reserved.
+
+     Redistribution and use in source and binary forms, with or
+     without modification, is permitted pursuant to, and subject
+     to the license terms contained in, the Simplified BSD
+     License set forth in Section 4.c of the IETF Trust's Legal
+     Provisions Relating to IETF Documents
+     (http://trustee.ietf.org/license-info).
+
+     This version of this YANG module is part of RFC 8519; see
+     the RFC itself for full legal notices.";
+
+  revision 2019-03-04 {
+    description
+      "Initial version.";
+    reference
+      "RFC 8519: YANG Data Model for Network Access Control
+                 Lists (ACLs).";
+  }
+
+  /*
+   * Typedefs
+   */
+  typedef operator {
+    type enumeration {
+      enum lte {
+        description
+          "Less than or equal to.";
+      }
+      enum gte {
+        description
+          "Greater than or equal to.";
+      }
+      enum eq {
+        description
+          "Equal to.";
+      }
+      enum neq {
+        description
+          "Not equal to.";
+      }
+    }
+    description
+      "The source and destination port range definitions
+       can be further qualified using an operator.  An
+       operator is needed only if the lower-port is specified
+       and the upper-port is not specified.  The operator
+       therefore further qualifies the lower-port only.";
+  }
+
+  /*
+   * Groupings
+   */
+  grouping port-range-or-operator {
+    choice port-range-or-operator {
+      case range {
+        leaf lower-port {
+          type inet:port-number;
+          must '. <= ../upper-port' {
+            error-message
+              "The lower-port must be less than or equal to
+               the upper-port.";
+          }
+          mandatory true;
+          description
+            "Lower boundary for a port.";
+        }
+        leaf upper-port {
+          type inet:port-number;
+          mandatory true;
+          description
+            "Upper boundary for a port.";
+        }
+      }
+      case operator {
+        leaf operator {
+          type operator;
+          default "eq";
+          description
+            "Operator to be applied on the port below.";
+        }
+        leaf port {
+          type inet:port-number;
+          mandatory true;
+          description
+            "Port number along with the operator on which to
+             match.";
+        }
+      }
+      description
+        "Choice of specifying a port range or a single
+         port along with an operator.";
+    }
+    description
+      "Grouping for port definitions in the form of a
+       choice statement.";
+  }
+
+  grouping acl-ip-header-fields {
+    description
+      "IP header fields common to IPv4 and IPv6";
+    reference
+      "RFC 791: Internet Protocol.";
+
+    leaf dscp {
+      type inet:dscp;
+      description
+        "Differentiated Services Code Point.";
+      reference
+        "RFC 2474: Definition of the Differentiated Services
+                   Field (DS Field) in the IPv4 and IPv6
+                   Headers.";
+    }
+
+    leaf ecn {
+      type uint8 {
+        range "0..3";
+      }
+      description
+        "Explicit Congestion Notification.";
+      reference
+        "RFC 3168: The Addition of Explicit Congestion
+                   Notification (ECN) to IP.";
+    }
+
+    leaf length {
+      type uint16;
+      description
+        "In the IPv4 header field, this field is known as the Total
+         Length.  Total Length is the length of the datagram, measured
+         in octets, including internet header and data.
+
+         In the IPv6 header field, this field is known as the Payload
+         Length, which is the length of the IPv6 payload, i.e., the rest
+         of the packet following the IPv6 header, in octets.";
+      reference
+        "RFC 791: Internet Protocol
+         RFC 8200: Internet Protocol, Version 6 (IPv6) Specification.";
+    }
+    leaf ttl {
+      type uint8;
+      description
+        "This field indicates the maximum time the datagram is allowed
+         to remain in the internet system.  If this field contains the
+         value zero, then the datagram must be dropped.
+
+         In IPv6, this field is known as the Hop Limit.";
+      reference
+        "RFC 791: Internet Protocol
+         RFC 8200: Internet Protocol, Version 6 (IPv6) Specification.";
+    }
+    leaf protocol {
+      type uint8;
+      description
+        "Internet Protocol number.  Refers to the protocol of the
+         payload.  In IPv6, this field is known as 'next-header',
+         and if extension headers are present, the protocol is
+         present in the 'upper-layer' header.";
+      reference
+        "RFC 791: Internet Protocol
+         RFC 8200: Internet Protocol, Version 6 (IPv6) Specification.";
+    }
+  }
+
+  grouping acl-ipv4-header-fields {
+    description
+      "Fields in the IPv4 header.";
+    leaf ihl {
+      type uint8 {
+        range "5..60";
+      }
+      description
+        "In an IPv4 header field, the Internet Header Length (IHL) is
+         the length of the internet header in 32-bit words and
+         thus points to the beginning of the data.  Note that the
+         minimum value for a correct header is 5.";
+    }
+    leaf flags {
+      type bits {
+        bit reserved {
+          position 0;
+          description
+            "Reserved.  Must be zero.";
+        }
+        bit fragment {
+          position 1;
+          description
+            "Setting the value to 0 indicates may fragment, while
+             setting the value to 1 indicates do not fragment.";
+        }
+        bit more {
+          position 2;
+          description
+            "Setting the value to 0 indicates this is the last fragment,
+             and setting the value to 1 indicates more fragments are
+             coming.";
+        }
+      }
+      description
+        "Bit definitions for the Flags field in the IPv4 header.";
+    }
+    leaf offset {
+      type uint16 {
+        range "20..65535";
+      }
+      description
+        "The fragment offset is measured in units of 8 octets (64 bits).
+         The first fragment has offset zero.  The length is 13 bits";
+    }
+    leaf identification {
+      type uint16;
+      description
+        "An identifying value assigned by the sender to aid in
+         assembling the fragments of a datagram.";
+    }
+
+    choice destination-network {
+      case destination-ipv4-network {
+        leaf destination-ipv4-network {
+          type inet:ipv4-prefix;
+          description
+            "Destination IPv4 address prefix.";
+        }
+      }
+      description
+        "Choice of specifying a destination IPv4 address or
+         referring to a group of IPv4 destination addresses.";
+    }
+
+    choice source-network {
+      case source-ipv4-network {
+        leaf source-ipv4-network {
+          type inet:ipv4-prefix;
+          description
+            "Source IPv4 address prefix.";
+        }
+      }
+      description
+        "Choice of specifying a source IPv4 address or
+         referring to a group of IPv4 source addresses.";
+    }
+  }
+
+  grouping acl-ipv6-header-fields {
+    description
+      "Fields in the IPv6 header.";
+
+    choice destination-network {
+      case destination-ipv6-network {
+        leaf destination-ipv6-network {
+          type inet:ipv6-prefix;
+          description
+            "Destination IPv6 address prefix.";
+        }
+      }
+      description
+        "Choice of specifying a destination IPv6 address
+         or referring to a group of IPv6 destination
+         addresses.";
+    }
+
+    choice source-network {
+      case source-ipv6-network {
+        leaf source-ipv6-network {
+          type inet:ipv6-prefix;
+          description
+            "Source IPv6 address prefix.";
+        }
+      }
+      description
+        "Choice of specifying a source IPv6 address or
+         referring to a group of IPv6 source addresses.";
+    }
+
+    leaf flow-label {
+      type inet:ipv6-flow-label;
+      description
+        "IPv6 Flow label.";
+    }
+    reference
+      "RFC 4291: IP Version 6 Addressing Architecture
+       RFC 4007: IPv6 Scoped Address Architecture
+       RFC 5952: A Recommendation for IPv6 Address Text
+                 Representation.";
+  }
+
+  grouping acl-eth-header-fields {
+    description
+      "Fields in the Ethernet header.";
+    leaf destination-mac-address {
+      type yang:mac-address;
+      description
+        "Destination IEEE 802 Media Access Control (MAC)
+         address.";
+    }
+    leaf destination-mac-address-mask {
+      type yang:mac-address;
+      description
+        "Destination IEEE 802 MAC address mask.";
+    }
+    leaf source-mac-address {
+      type yang:mac-address;
+      description
+        "Source IEEE 802 MAC address.";
+    }
+    leaf source-mac-address-mask {
+      type yang:mac-address;
+      description
+        "Source IEEE 802 MAC address mask.";
+    }
+    leaf ethertype {
+      type eth:ethertype;
+      description
+        "The Ethernet Type (or Length) value represented
+         in the canonical order defined by IEEE 802.
+         The canonical representation uses lowercase
+         characters.";
+      reference
+        "IEEE 802-2014, Clause 9.2.";
+    }
+    reference
+      "IEEE 802: IEEE Standard for Local and Metropolitan
+       Area Networks: Overview and Architecture.";
+  }
+
+  grouping acl-tcp-header-fields {
+    description
+      "Collection of TCP header fields that can be used to
+       set up a match filter.";
+    leaf sequence-number {
+      type uint32;
+      description
+        "Sequence number that appears in the packet.";
+    }
+    leaf acknowledgement-number {
+      type uint32;
+      description
+        "The acknowledgement number that appears in the
+         packet.";
+    }
+    leaf data-offset {
+      type uint8 {
+        range "5..15";
+      }
+      description
+        "Specifies the size of the TCP header in 32-bit
+         words.  The minimum size header is 5 words and
+         the maximum is 15 words; thus, this gives a
+         minimum size of 20 bytes and a maximum of 60
+         bytes, allowing for up to 40 bytes of options
+         in the header.";
+    }
+    leaf reserved {
+      type uint8;
+      description
+        "Reserved for future use.";
+    }
+    leaf flags {
+      type bits {
+        bit cwr {
+          position 1;
+          description
+            "The Congestion Window Reduced (CWR) flag is set
+             by the sending host to indicate that it received
+             a TCP segment with the ECN-Echo (ECE) flag set
+             and had responded in the congestion control
+             mechanism.";
+          reference
+            "RFC 3168: The Addition of Explicit Congestion
+                       Notification (ECN) to IP.";
+        }
+        bit ece {
+          position 2;
+          description
+            "ECN-Echo has a dual role, depending on the value
+             of the SYN flag.  It indicates the following: if
+             the SYN flag is set (1), the TCP peer is ECN
+             capable, and if the SYN flag is clear (0), a packet
+             with the Congestion Experienced flag set (ECN=11)
+             in the IP header was received during normal
+             transmission (added to the header by RFC 3168).
+             This serves as an indication of network congestion
+             (or impending congestion) to the TCP sender.";
+          reference
+            "RFC 3168: The Addition of Explicit Congestion
+                       Notification (ECN) to IP.";
+        }
+        bit urg {
+          position 3;
+          description
+            "Indicates that the Urgent Pointer field is significant.";
+        }
+        bit ack {
+          position 4;
+          description
+            "Indicates that the Acknowledgement field is significant.
+             All packets after the initial SYN packet sent by the
+             client should have this flag set.";
+        }
+        bit psh {
+          position 5;
+          description
+            "Push function.  Asks to push the buffered data to the
+             receiving application.";
+        }
+        bit rst {
+          position 6;
+          description
+            "Reset the connection.";
+        }
+        bit syn {
+          position 7;
+          description
+            "Synchronize sequence numbers.  Only the first packet
+             sent from each end should have this flag set.  Some
+             other flags and fields change meaning based on this
+             flag, and some are only valid for when it is set,
+             and others when it is clear.";
+        }
+        bit fin {
+          position 8;
+          description
+            "Last package from the sender.";
+        }
+      }
+      description
+        "Also known as Control Bits.  Contains nine 1-bit flags.";
+      reference
+        "RFC 793: Transmission Control Protocol.";
+    }
+    leaf window-size {
+      type uint16;
+      units "bytes";
+      description
+        "The size of the receive window, which specifies
+         the number of window size units beyond the segment
+         identified by the sequence number in the Acknowledgement
+         field that the sender of this segment is currently
+         willing to receive.";
+    }
+    leaf urgent-pointer {
+      type uint16;
+      description
+        "This field is an offset from the sequence number
+         indicating the last urgent data byte.";
+    }
+    leaf options {
+      type binary {
+        length "1..40";
+      }
+      description
+        "The length of this field is determined by the
+         Data Offset field.  Options have up to three
+         fields: Option-Kind (1 byte), Option-Length
+         (1 byte), and Option-Data (variable).  The Option-Kind
+         field indicates the type of option and is the
+         only field that is not optional.  Depending on
+         what kind of option we are dealing with,
+         the next two fields may be set: the Option-Length
+         field indicates the total length of the option,
+         and the Option-Data field contains the value of
+         the option, if applicable.";
+    }
+  }
+
+  grouping acl-udp-header-fields {
+    description
+      "Collection of UDP header fields that can be used
+       to set up a match filter.";
+    leaf length {
+      type uint16;
+      description
+        "A field that specifies the length in bytes of
+         the UDP header and UDP data.  The minimum
+         length is 8 bytes because that is the length of
+         the header.  The field size sets a theoretical
+         limit of 65,535 bytes (8-byte header plus 65,527
+         bytes of data) for a UDP datagram.  However, the
+         actual limit for the data length, which is
+         imposed by the underlying IPv4 protocol, is
+         65,507 bytes (65,535 minus 8-byte UDP header
+         minus 20-byte IP header).
+
+         In IPv6 jumbograms, it is possible to have
+         UDP packets of a size greater than 65,535 bytes.
+         RFC 2675 specifies that the Length field is set
+         to zero if the length of the UDP header plus
+         UDP data is greater than 65,535.";
+    }
+  }
+
+  grouping acl-icmp-header-fields {
+    description
+      "Collection of ICMP header fields that can be
+       used to set up a match filter.";
+    leaf type {
+      type uint8;
+      description
+        "Also known as control messages.";
+      reference
+        "RFC 792: Internet Control Message Protocol
+         RFC 4443: Internet Control Message Protocol (ICMPv6)
+                   for Internet Protocol Version 6 (IPv6)
+                   Specification.";
+    }
+    leaf code {
+      type uint8;
+      description
+        "ICMP subtype.  Also known as control messages.";
+      reference
+        "RFC 792: Internet Control Message Protocol
+         RFC 4443: Internet Control Message Protocol (ICMPv6)
+                   for Internet Protocol Version 6 (IPv6)
+                   Specification.";
+    }
+    leaf rest-of-header {
+      type binary;
+      description
+        "Unbounded in length, the contents vary based on the
+         ICMP type and code.  Also referred to as 'Message Body'
+         in ICMPv6.";
+      reference
+        "RFC 792: Internet Control Message Protocol
+         RFC 4443: Internet Control Message Protocol (ICMPv6)
+                   for Internet Protocol Version 6 (IPv6)
+                   Specification.";
+    }
+  }
+}
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-yang-types@2013-07-15.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-yang-types@2013-07-15.yang
new file mode 100644
index 0000000000000000000000000000000000000000..956562a7b342055127961732d8bde4be21c80d7d
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_acl/yang/ietf-yang-types@2013-07-15.yang
@@ -0,0 +1,475 @@
+   module ietf-yang-types {
+
+     namespace "urn:ietf:params:xml:ns:yang:ietf-yang-types";
+     prefix "yang";
+
+     organization
+      "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+     contact
+      "WG Web:   <http://tools.ietf.org/wg/netmod/>
+       WG List:  <mailto:netmod@ietf.org>
+
+       WG Chair: David Kessens
+                 <mailto:david.kessens@nsn.com>
+
+       WG Chair: Juergen Schoenwaelder
+                 <mailto:j.schoenwaelder@jacobs-university.de>
+
+       Editor:   Juergen Schoenwaelder
+                 <mailto:j.schoenwaelder@jacobs-university.de>";
+
+     description
+      "This module contains a collection of generally useful derived
+       YANG data types.
+
+       Copyright (c) 2013 IETF Trust and the persons identified as
+       authors of the code.  All rights reserved.
+
+       Redistribution and use in source and binary forms, with or
+       without modification, is permitted pursuant to, and subject
+       to the license terms contained in, the Simplified BSD License
+       set forth in Section 4.c of the IETF Trust's Legal Provisions
+       Relating to IETF Documents
+       (http://trustee.ietf.org/license-info).
+
+       This version of this YANG module is part of RFC 6991; see
+       the RFC itself for full legal notices.";
+
+     revision 2013-07-15 {
+       description
+        "This revision adds the following new data types:
+         - yang-identifier
+         - hex-string
+         - uuid
+         - dotted-quad";
+       reference
+        "RFC 6991: Common YANG Data Types";
+     }
+
+     revision 2010-09-24 {
+       description
+        "Initial revision.";
+       reference
+        "RFC 6021: Common YANG Data Types";
+     }
+
+     /*** collection of counter and gauge types ***/
+
+     typedef counter32 {
+       type uint32;
+       description
+        "The counter32 type represents a non-negative integer
+         that monotonically increases until it reaches a
+         maximum value of 2^32-1 (4294967295 decimal), when it
+         wraps around and starts increasing again from zero.
+
+         Counters have no defined 'initial' value, and thus, a
+         single value of a counter has (in general) no information
+         content.  Discontinuities in the monotonically increasing
+         value normally occur at re-initialization of the
+         management system, and at other times as specified in the
+         description of a schema node using this type.  If such
+         other times can occur, for example, the creation of
+         a schema node of type counter32 at times other than
+         re-initialization, then a corresponding schema node
+         should be defined, with an appropriate type, to indicate
+         the last discontinuity.
+
+         The counter32 type should not be used for configuration
+         schema nodes.  A default statement SHOULD NOT be used in
+         combination with the type counter32.
+
+         In the value set and its semantics, this type is equivalent
+         to the Counter32 type of the SMIv2.";
+       reference
+        "RFC 2578: Structure of Management Information Version 2
+                   (SMIv2)";
+     }
+
+     typedef zero-based-counter32 {
+       type yang:counter32;
+       default "0";
+       description
+        "The zero-based-counter32 type represents a counter32
+         that has the defined 'initial' value zero.
+
+         A schema node of this type will be set to zero (0) on creation
+         and will thereafter increase monotonically until it reaches
+         a maximum value of 2^32-1 (4294967295 decimal), when it
+         wraps around and starts increasing again from zero.
+
+         Provided that an application discovers a new schema node
+         of this type within the minimum time to wrap, it can use the
+         'initial' value as a delta.  It is important for a management
+         station to be aware of this minimum time and the actual time
+         between polls, and to discard data if the actual time is too
+         long or there is no defined minimum time.
+
+         In the value set and its semantics, this type is equivalent
+         to the ZeroBasedCounter32 textual convention of the SMIv2.";
+       reference
+         "RFC 4502: Remote Network Monitoring Management Information
+                    Base Version 2";
+     }
+
+     typedef counter64 {
+       type uint64;
+       description
+        "The counter64 type represents a non-negative integer
+         that monotonically increases until it reaches a
+         maximum value of 2^64-1 (18446744073709551615 decimal),
+         when it wraps around and starts increasing again from zero.
+
+         Counters have no defined 'initial' value, and thus, a
+         single value of a counter has (in general) no information
+         content.  Discontinuities in the monotonically increasing
+         value normally occur at re-initialization of the
+         management system, and at other times as specified in the
+         description of a schema node using this type.  If such
+         other times can occur, for example, the creation of
+         a schema node of type counter64 at times other than
+         re-initialization, then a corresponding schema node
+         should be defined, with an appropriate type, to indicate
+         the last discontinuity.
+
+         The counter64 type should not be used for configuration
+         schema nodes.  A default statement SHOULD NOT be used in
+         combination with the type counter64.
+
+         In the value set and its semantics, this type is equivalent
+         to the Counter64 type of the SMIv2.";
+       reference
+        "RFC 2578: Structure of Management Information Version 2
+                   (SMIv2)";
+     }
+
+     typedef zero-based-counter64 {
+       type yang:counter64;
+       default "0";
+       description
+        "The zero-based-counter64 type represents a counter64 that
+         has the defined 'initial' value zero.
+
+         A schema node of this type will be set to zero (0) on creation
+         and will thereafter increase monotonically until it reaches
+         a maximum value of 2^64-1 (18446744073709551615 decimal),
+         when it wraps around and starts increasing again from zero.
+
+         Provided that an application discovers a new schema node
+         of this type within the minimum time to wrap, it can use the
+         'initial' value as a delta.  It is important for a management
+         station to be aware of this minimum time and the actual time
+         between polls, and to discard data if the actual time is too
+         long or there is no defined minimum time.
+
+         In the value set and its semantics, this type is equivalent
+         to the ZeroBasedCounter64 textual convention of the SMIv2.";
+       reference
+        "RFC 2856: Textual Conventions for Additional High Capacity
+                   Data Types";
+     }
+
+     typedef gauge32 {
+       type uint32;
+       description
+        "The gauge32 type represents a non-negative integer, which
+         may increase or decrease, but shall never exceed a maximum
+         value, nor fall below a minimum value.  The maximum value
+         cannot be greater than 2^32-1 (4294967295 decimal), and
+         the minimum value cannot be smaller than 0.  The value of
+         a gauge32 has its maximum value whenever the information
+         being modeled is greater than or equal to its maximum
+         value, and has its minimum value whenever the information
+         being modeled is smaller than or equal to its minimum value.
+         If the information being modeled subsequently decreases
+         below (increases above) the maximum (minimum) value, the
+         gauge32 also decreases (increases).
+
+         In the value set and its semantics, this type is equivalent
+         to the Gauge32 type of the SMIv2.";
+       reference
+        "RFC 2578: Structure of Management Information Version 2
+                   (SMIv2)";
+     }
+
+     typedef gauge64 {
+       type uint64;
+       description
+        "The gauge64 type represents a non-negative integer, which
+         may increase or decrease, but shall never exceed a maximum
+         value, nor fall below a minimum value.  The maximum value
+         cannot be greater than 2^64-1 (18446744073709551615), and
+         the minimum value cannot be smaller than 0.  The value of
+         a gauge64 has its maximum value whenever the information
+         being modeled is greater than or equal to its maximum
+         value, and has its minimum value whenever the information
+         being modeled is smaller than or equal to its minimum value.
+         If the information being modeled subsequently decreases
+         below (increases above) the maximum (minimum) value, the
+         gauge64 also decreases (increases).
+
+         In the value set and its semantics, this type is equivalent
+         to the CounterBasedGauge64 SMIv2 textual convention defined
+         in RFC 2856";
+       reference
+        "RFC 2856: Textual Conventions for Additional High Capacity
+                   Data Types";
+     }
+
+     /*** collection of identifier-related types ***/
+
+     typedef object-identifier {
+       type string {
+         pattern '(([0-1](\.[1-3]?[0-9]))|(2\.(0|([1-9]\d*))))'
+               + '(\.(0|([1-9]\d*)))*';
+       }
+       description
+        "The object-identifier type represents administratively
+         assigned names in a registration-hierarchical-name tree.
+
+         Values of this type are denoted as a sequence of numerical
+         non-negative sub-identifier values.  Each sub-identifier
+         value MUST NOT exceed 2^32-1 (4294967295).  Sub-identifiers
+         are separated by single dots and without any intermediate
+         whitespace.
+
+         The ASN.1 standard restricts the value space of the first
+         sub-identifier to 0, 1, or 2.  Furthermore, the value space
+         of the second sub-identifier is restricted to the range
+         0 to 39 if the first sub-identifier is 0 or 1.  Finally,
+         the ASN.1 standard requires that an object identifier
+         has always at least two sub-identifiers.  The pattern
+         captures these restrictions.
+
+         Although the number of sub-identifiers is not limited,
+         module designers should realize that there may be
+         implementations that stick with the SMIv2 limit of 128
+         sub-identifiers.
+
+         This type is a superset of the SMIv2 OBJECT IDENTIFIER type
+         since it is not restricted to 128 sub-identifiers.  Hence,
+         this type SHOULD NOT be used to represent the SMIv2 OBJECT
+         IDENTIFIER type; the object-identifier-128 type SHOULD be
+         used instead.";
+       reference
+        "ISO9834-1: Information technology -- Open Systems
+         Interconnection -- Procedures for the operation of OSI
+         Registration Authorities: General procedures and top
+         arcs of the ASN.1 Object Identifier tree";
+     }
+
+     typedef object-identifier-128 {
+       type object-identifier {
+         pattern '\d*(\.\d*){1,127}';
+       }
+       description
+        "This type represents object-identifiers restricted to 128
+         sub-identifiers.
+
+         In the value set and its semantics, this type is equivalent
+         to the OBJECT IDENTIFIER type of the SMIv2.";
+       reference
+        "RFC 2578: Structure of Management Information Version 2
+                   (SMIv2)";
+     }
+
+     typedef yang-identifier {
+       type string {
+         length "1..max";
+         pattern '[a-zA-Z_][a-zA-Z0-9\-_.]*';
+         pattern '.|..|[^xX].*|.[^mM].*|..[^lL].*';
+       }
+       description
+         "A YANG identifier string as defined by the 'identifier'
+          rule in Section 12 of RFC 6020.  An identifier must
+          start with an alphabetic character or an underscore
+          followed by an arbitrary sequence of alphabetic or
+          numeric characters, underscores, hyphens, or dots.
+
+          A YANG identifier MUST NOT start with any possible
+          combination of the lowercase or uppercase character
+          sequence 'xml'.";
+       reference
+         "RFC 6020: YANG - A Data Modeling Language for the Network
+                    Configuration Protocol (NETCONF)";
+     }
+
+     /*** collection of types related to date and time***/
+
+     typedef date-and-time {
+       type string {
+         pattern '\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?'
+               + '(Z|[\+\-]\d{2}:\d{2})';
+       }
+       description
+        "The date-and-time type is a profile of the ISO 8601
+         standard for representation of dates and times using the
+         Gregorian calendar.  The profile is defined by the
+         date-time production in Section 5.6 of RFC 3339.
+
+         The date-and-time type is compatible with the dateTime XML
+         schema type with the following notable exceptions:
+
+         (a) The date-and-time type does not allow negative years.
+
+         (b) The date-and-time time-offset -00:00 indicates an unknown
+             time zone (see RFC 3339) while -00:00 and +00:00 and Z
+             all represent the same time zone in dateTime.
+
+         (c) The canonical format (see below) of data-and-time values
+             differs from the canonical format used by the dateTime XML
+             schema type, which requires all times to be in UTC using
+             the time-offset 'Z'.
+
+         This type is not equivalent to the DateAndTime textual
+         convention of the SMIv2 since RFC 3339 uses a different
+         separator between full-date and full-time and provides
+         higher resolution of time-secfrac.
+
+         The canonical format for date-and-time values with a known time
+         zone uses a numeric time zone offset that is calculated using
+         the device's configured known offset to UTC time.  A change of
+         the device's offset to UTC time will cause date-and-time values
+         to change accordingly.  Such changes might happen periodically
+         in case a server follows automatically daylight saving time
+         (DST) time zone offset changes.  The canonical format for
+         date-and-time values with an unknown time zone (usually
+         referring to the notion of local time) uses the time-offset
+         -00:00.";
+       reference
+        "RFC 3339: Date and Time on the Internet: Timestamps
+         RFC 2579: Textual Conventions for SMIv2
+         XSD-TYPES: XML Schema Part 2: Datatypes Second Edition";
+     }
+
+     typedef timeticks {
+       type uint32;
+       description
+        "The timeticks type represents a non-negative integer that
+         represents the time, modulo 2^32 (4294967296 decimal), in
+         hundredths of a second between two epochs.  When a schema
+         node is defined that uses this type, the description of
+         the schema node identifies both of the reference epochs.
+
+         In the value set and its semantics, this type is equivalent
+         to the TimeTicks type of the SMIv2.";
+       reference
+        "RFC 2578: Structure of Management Information Version 2
+                   (SMIv2)";
+     }
+
+     typedef timestamp {
+       type yang:timeticks;
+       description
+        "The timestamp type represents the value of an associated
+         timeticks schema node at which a specific occurrence
+         happened.  The specific occurrence must be defined in the
+         description of any schema node defined using this type.  When
+         the specific occurrence occurred prior to the last time the
+         associated timeticks attribute was zero, then the timestamp
+         value is zero.  Note that this requires all timestamp values
+         to be reset to zero when the value of the associated timeticks
+         attribute reaches 497+ days and wraps around to zero.
+
+         The associated timeticks schema node must be specified
+         in the description of any schema node using this type.
+
+         In the value set and its semantics, this type is equivalent
+         to the TimeStamp textual convention of the SMIv2.";
+       reference
+        "RFC 2579: Textual Conventions for SMIv2";
+     }
+
+     /*** collection of generic address types ***/
+
+     typedef phys-address {
+       type string {
+         pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?';
+       }
+
+       description
+        "Represents media- or physical-level addresses represented
+         as a sequence octets, each octet represented by two hexadecimal
+         numbers.  Octets are separated by colons.  The canonical
+         representation uses lowercase characters.
+
+         In the value set and its semantics, this type is equivalent
+         to the PhysAddress textual convention of the SMIv2.";
+       reference
+        "RFC 2579: Textual Conventions for SMIv2";
+     }
+
+     typedef mac-address {
+       type string {
+         pattern '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}';
+       }
+       description
+        "The mac-address type represents an IEEE 802 MAC address.
+         The canonical representation uses lowercase characters.
+
+         In the value set and its semantics, this type is equivalent
+         to the MacAddress textual convention of the SMIv2.";
+       reference
+        "IEEE 802: IEEE Standard for Local and Metropolitan Area
+                   Networks: Overview and Architecture
+         RFC 2579: Textual Conventions for SMIv2";
+     }
+
+     /*** collection of XML-specific types ***/
+
+     typedef xpath1.0 {
+       type string;
+       description
+        "This type represents an XPATH 1.0 expression.
+
+         When a schema node is defined that uses this type, the
+         description of the schema node MUST specify the XPath
+         context in which the XPath expression is evaluated.";
+       reference
+        "XPATH: XML Path Language (XPath) Version 1.0";
+     }
+
+     /*** collection of string types ***/
+
+     typedef hex-string {
+       type string {
+         pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?';
+       }
+
+       description
+        "A hexadecimal string with octets represented as hex digits
+         separated by colons.  The canonical representation uses
+         lowercase characters.";
+     }
+
+     typedef uuid {
+       type string {
+         pattern '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-'
+               + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12}';
+       }
+       description
+        "A Universally Unique IDentifier in the string representation
+         defined in RFC 4122.  The canonical representation uses
+         lowercase characters.
+
+         The following is an example of a UUID in string representation:
+         f81d4fae-7dec-11d0-a765-00a0c91e6bf6
+         ";
+       reference
+        "RFC 4122: A Universally Unique IDentifier (UUID) URN
+                   Namespace";
+     }
+
+     typedef dotted-quad {
+       type string {
+         pattern
+           '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+         + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])';
+       }
+       description
+         "An unsigned 32-bit number expressed in the dotted-quad
+          notation, i.e., four octets written as decimal numbers
+          and separated with the '.' (full stop) character.";
+     }
+   }
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/Hardware.py b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/Hardware.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7404b924a44e9125dbf84bdcdfab3b9af790e5d
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/Hardware.py
@@ -0,0 +1,53 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from flask import request
+from flask.json import jsonify
+from flask_restful import Resource
+from common.tools.context_queries.Device import get_device
+from context.client.ContextClient import ContextClient
+from ..tools.Authentication import HTTP_AUTH
+from ..tools.HttpStatusCodes import HTTP_OK, HTTP_SERVERERROR
+from .YangHandler import YangHandler
+
+LOGGER = logging.getLogger(__name__)
+
+class Hardware(Resource):
+    @HTTP_AUTH.login_required
+    def get(self, device_uuid : str):
+        LOGGER.debug('Device UUID: {:s}'.format(str(device_uuid)))
+        LOGGER.debug('Request: {:s}'.format(str(request)))
+
+        try:
+            context_client = ContextClient()
+            device = get_device(
+                context_client, device_uuid, rw_copy=False,
+                include_endpoints=False, include_config_rules=False, include_components=True
+            )
+            if device is None:
+                raise Exception('Device({:s}) not found in database'.format(str(device_uuid)))
+
+            yang_handler = YangHandler()
+            hardware_reply = yang_handler.compose(device)
+            yang_handler.destroy()
+
+            response = jsonify(hardware_reply)
+            response.status_code = HTTP_OK
+        except Exception as e: # pylint: disable=broad-except
+            MSG = 'Something went wrong Retrieving Hardware of Device({:s})'
+            LOGGER.exception(MSG.format(str(device_uuid)))
+            response = jsonify({'error': str(e)})
+            response.status_code = HTTP_SERVERERROR
+        return response
\ No newline at end of file
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/YangHandler.py b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/YangHandler.py
new file mode 100644
index 0000000000000000000000000000000000000000..4534294857e0c7972f4764927fe6d23bc656ad5f
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/YangHandler.py
@@ -0,0 +1,132 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import libyang, os
+from common.proto.context_pb2 import Device
+from typing import Dict, Optional
+import json
+import logging
+import re
+import datetime
+
+LOGGER = logging.getLogger(__name__)
+YANG_DIR = os.path.join(os.path.dirname(__file__), 'yang')
+YANG_MODULES = [
+    'iana-hardware',
+    'ietf-hardware'
+]
+
+class YangHandler:
+    def __init__(self) -> None:
+        self._yang_context = libyang.Context(YANG_DIR)
+        for yang_module_name in YANG_MODULES:
+            LOGGER.info('Loading module: {:s}'.format(str(yang_module_name)))
+            self._yang_context.load_module(yang_module_name).feature_enable_all()
+
+    def parse_to_dict(self, message : Dict) -> Dict:
+        yang_module = self._yang_context.get_module('ietf-hardware')
+        dnode : Optional[libyang.DNode] = yang_module.parse_data_dict(
+            message, validate_present=True, validate=True, strict=True
+        )
+        if dnode is None: raise Exception('Unable to parse Message({:s})'.format(str(message)))
+        message = dnode.print_dict()
+        dnode.free()
+        return message
+
+    
+    @staticmethod
+    def convert_to_iso_date(date_str: str) -> Optional[str]:
+        date_str = date_str.strip('"')
+        # Define the regex pattern for ISO 8601 date format
+        pattern = r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?(Z|[\+\-]\d{2}:\d{2})"
+        # Check if the input date string matches the pattern
+        if re.match(pattern, date_str):
+            return date_str  # Already in ISO format
+        else:
+            try:
+                # Parse the input date string as a datetime object
+                datetime_obj = datetime.datetime.strptime(date_str, "%Y-%m-%d")
+                # Convert to ISO format
+                iso_date = datetime_obj.isoformat() + "Z"
+                return iso_date
+            except ValueError:
+                return None  # Invalid date format
+
+
+    def compose(self, device : Device) -> Dict:
+        # compose device iterating through the components
+ 
+        hardware = self._yang_context.create_data_path('/ietf-hardware:hardware')
+        physical_index = 1
+        
+        for component in device.components:
+            attributes = component.attributes
+
+            component_new = hardware.create_path('component[name="{:s}"]'.format(component.name))
+            component_new.create_path('name', component.name)
+
+            #Cambiar las clases especiales, su formato  y añadir isfru 
+            component_type = component.type
+            if component_type == "TRANSCEIVER" :
+                component_type = "module"
+
+            if component_type == "FRU" :
+                component_type = "slack"
+                component_new.create_path('is-fru', True)
+            else :
+                component_new.create_path('is-fru', False)
+                
+            component_type = component_type.replace("_", "-").lower()
+            component_type = 'iana-hardware:' + component_type
+
+            component_new.create_path('class', component_type)
+
+            #Añadir resto de atributos en IETF
+
+            physical_index += 1
+            component_new.create_path('physical-index', physical_index)
+
+            component_new.create_path('description', attributes["description"])
+
+            component_new.create_path('parent', component.parent)
+
+            if attributes["mfg-date"] != "":
+                mfg_date = self.convert_to_iso_date(attributes["mfg-date"])
+                LOGGER.info('component[name="{:s}"]'.format(attributes["mfg-date"]))
+                component_new.create_path('mfg-date', mfg_date)
+
+            component_new.create_path('hardware-rev', attributes["hardware-rev"])
+            component_new.create_path('software-rev', attributes["software-rev"])
+            component_new.create_path('firmware-rev', attributes["firmware-version"])
+            component_new.create_path('serial-num', attributes["serial-num"])
+            component_new.create_path('mfg-name', attributes["mfg-name"])
+            if attributes["id"]:
+                component_new.create_path('parent-rel-pos', attributes["id"])
+            
+            component_new.create_path('uri', component.name)
+   
+
+            component_new.create_path('uuid', component.component_uuid.uuid)
+
+            contains_child = []
+            for component2 in device.components:
+                if component.name == component2.parent : 
+                 contains_child.append(component2.name)
+            
+            component_new.create_path('contains-child', contains_child)
+
+        return json.loads(hardware.print_mem('json'))
+                                    
+    def destroy(self) -> None:
+        self._yang_context.destroy()
\ No newline at end of file
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/__init__.py b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a0cedcc4df9a198621e2585bafef1768ad1f8ca
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/__init__.py
@@ -0,0 +1,21 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from nbi.service.rest_server.nbi_plugins.ietf_hardware.Hardware import Hardware
+from nbi.service.rest_server.RestServer import RestServer
+
+URL_PREFIX = "/restconf/data/device=<path:device_uuid>/ietf-hardware:hardware"
+
+def register_ietf_hardware(rest_server: RestServer):
+    rest_server.add_resource(Hardware, URL_PREFIX)
\ No newline at end of file
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/iana-hardware@2018-03-13.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/iana-hardware@2018-03-13.yang
new file mode 100644
index 0000000000000000000000000000000000000000..5cd52648ff9b676dc04a98b5b85bd180f88f8a6e
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/iana-hardware@2018-03-13.yang
@@ -0,0 +1,189 @@
+module iana-hardware {
+  yang-version 1.1;
+  namespace "urn:ietf:params:xml:ns:yang:iana-hardware";
+  prefix ianahw;
+
+  organization "IANA";
+  contact
+    "        Internet Assigned Numbers Authority
+
+     Postal: ICANN
+             12025 Waterfront Drive, Suite 300
+             Los Angeles, CA  90094-2536
+             United States of America
+
+     Tel:    +1 310 301 5800
+     E-Mail: iana@iana.org>";
+
+  description
+    "IANA-defined identities for hardware class.
+
+     The latest revision of this YANG module can be obtained from
+     the IANA website.
+
+     Requests for new values should be made to IANA via
+     email (iana@iana.org).
+
+     Copyright (c) 2018 IETF Trust and the persons identified as
+     authors of the code.  All rights reserved.
+
+     Redistribution and use in source and binary forms, with or
+     without modification, is permitted pursuant to, and subject
+     to the license terms contained in, the Simplified BSD License
+
+     set forth in Section 4.c of the IETF Trust's Legal Provisions
+     Relating to IETF Documents
+     (https://trustee.ietf.org/license-info).
+
+     The initial version of this YANG module is part of RFC 8348;
+     see the RFC itself for full legal notices.";
+  reference
+    "https://www.iana.org/assignments/yang-parameters";
+
+  revision 2018-03-13 {
+    description
+      "Initial revision.";
+    reference
+      "RFC 8348: A YANG Data Model for Hardware Management";
+  }
+
+  /*
+   * Identities
+   */
+
+  identity hardware-class {
+    description
+      "This identity is the base for all hardware class
+       identifiers.";
+  }
+
+  identity unknown {
+    base ianahw:hardware-class;
+    description
+      "This identity is applicable if the hardware class is unknown
+       to the server.";
+  }
+
+  identity chassis {
+    base ianahw:hardware-class;
+    description
+      "This identity is applicable if the hardware class is an
+       overall container for networking equipment.  Any class of
+       physical component, except a stack, may be contained within a
+       chassis; a chassis may only be contained within a stack.";
+  }
+
+  identity backplane {
+    base ianahw:hardware-class;
+    description
+      "This identity is applicable if the hardware class is some sort
+       of device for aggregating and forwarding networking traffic,
+       such as a shared backplane in a modular ethernet switch.  Note
+       that an implementation may model a backplane as a single
+       physical component, which is actually implemented as multiple
+       discrete physical components (within a chassis or stack).";
+  }
+
+  identity container {
+    base ianahw:hardware-class;
+    description
+      "This identity is applicable if the hardware class is capable
+       of containing one or more removable physical entities,
+       possibly of different types.  For example, each (empty or
+       full) slot in a chassis will be modeled as a container.  Note
+       that all removable physical components should be modeled
+       within a container component, such as field-replaceable
+       modules, fans, or power supplies.  Note that all known
+       containers should be modeled by the agent, including empty
+       containers.";
+  }
+
+  identity power-supply {
+    base ianahw:hardware-class;
+    description
+      "This identity is applicable if the hardware class is a
+       power-supplying component.";
+  }
+
+  identity fan {
+    base ianahw:hardware-class;
+    description
+      "This identity is applicable if the hardware class is a fan or
+       other heat-reduction component.";
+  }
+
+  identity sensor {
+    base ianahw:hardware-class;
+    description
+      "This identity is applicable if the hardware class is some sort
+       of sensor, such as a temperature sensor within a router
+       chassis.";
+  }
+
+  identity module {
+    base ianahw:hardware-class;
+    description
+      "This identity is applicable if the hardware class is some sort
+       of self-contained sub-system.  If a module component is
+       removable, then it should be modeled within a container
+
+       component; otherwise, it should be modeled directly within
+       another physical component (e.g., a chassis or another
+       module).";
+  }
+
+  identity port {
+    base ianahw:hardware-class;
+    description
+      "This identity is applicable if the hardware class is some sort
+       of networking port capable of receiving and/or transmitting
+       networking traffic.";
+  }
+
+  identity stack {
+    base ianahw:hardware-class;
+    description
+      "This identity is applicable if the hardware class is some sort
+       of super-container (possibly virtual) intended to group
+       together multiple chassis entities.  A stack may be realized
+       by a virtual cable, a real interconnect cable attached to
+       multiple chassis, or multiple interconnect cables.  A stack
+       should not be modeled within any other physical components,
+       but a stack may be contained within another stack.  Only
+       chassis components should be contained within a stack.";
+  }
+
+  identity cpu {
+    base ianahw:hardware-class;
+    description
+      "This identity is applicable if the hardware class is some sort
+       of central processing unit.";
+  }
+
+  identity energy-object {
+    base ianahw:hardware-class;
+    description
+      "This identity is applicable if the hardware class is some sort
+       of energy object, i.e., it is a piece of equipment that is
+       part of or attached to a communications network that is
+       monitored, it is controlled, or it aids in the management of
+       another device for Energy Management.";
+  }
+
+  identity battery {
+    base ianahw:hardware-class;
+    description
+      "This identity is applicable if the hardware class is some sort
+       of battery.";
+  }
+
+  identity storage-drive {
+    base ianahw:hardware-class;
+    description
+      "This identity is applicable if the hardware class is some sort
+       of component with data storage capability as its main
+       functionality, e.g., hard disk drive (HDD), solid-state device
+       (SSD), solid-state hybrid drive (SSHD), object storage device
+       (OSD), or other.";
+  }
+}
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-hardware@2018-03-13.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-hardware@2018-03-13.yang
new file mode 100644
index 0000000000000000000000000000000000000000..4f984b616a61bb169f5ac132c3002ae1033aae7e
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-hardware@2018-03-13.yang
@@ -0,0 +1,1194 @@
+module ietf-hardware {
+  yang-version 1.1;
+  namespace "urn:ietf:params:xml:ns:yang:ietf-hardware";
+  prefix hw;
+
+  import ietf-inet-types {
+    prefix inet;
+  }
+  import ietf-yang-types {
+    prefix yang;
+  }
+  import iana-hardware {
+    prefix ianahw;
+  }
+
+  organization
+    "IETF NETMOD (Network Modeling) Working Group";
+
+  contact
+    "WG Web:   <https://datatracker.ietf.org/wg/netmod/>
+     WG List:  <mailto:netmod@ietf.org>
+
+     Editor:   Andy Bierman
+               <mailto:andy@yumaworks.com>
+
+     Editor:   Martin Bjorklund
+               <mailto:mbj@tail-f.com>
+
+     Editor:   Jie Dong
+               <mailto:jie.dong@huawei.com>
+
+     Editor:   Dan Romascanu
+               <mailto:dromasca@gmail.com>";
+
+  description
+    "This module contains a collection of YANG definitions for
+     managing hardware.
+
+     This data model is designed for the Network Management Datastore
+     Architecture (NMDA) defined in RFC 8342.
+     Copyright (c) 2018 IETF Trust and the persons identified as
+     authors of the code.  All rights reserved.
+
+     Redistribution and use in source and binary forms, with or
+     without modification, is permitted pursuant to, and subject
+     to the license terms contained in, the Simplified BSD License
+     set forth in Section 4.c of the IETF Trust's Legal Provisions
+     Relating to IETF Documents
+     (https://trustee.ietf.org/license-info).
+
+     This version of this YANG module is part of RFC 8348; see
+     the RFC itself for full legal notices.";
+
+  revision 2018-03-13 {
+    description
+      "Initial revision.";
+    reference
+      "RFC 8348: A YANG Data Model for Hardware Management";
+  }
+
+  /*
+   * Features
+   */
+
+  feature entity-mib {
+    description
+      "This feature indicates that the device implements
+       the ENTITY-MIB.";
+    reference
+      "RFC 6933: Entity MIB (Version 4)";
+  }
+
+  feature hardware-state {
+    description
+      "Indicates that ENTITY-STATE-MIB objects are supported";
+    reference
+      "RFC 4268: Entity State MIB";
+  }
+
+  feature hardware-sensor {
+    description
+      "Indicates that ENTITY-SENSOR-MIB objects are supported";
+    reference
+      "RFC 3433: Entity Sensor Management Information Base";
+  }
+
+  /*
+   * Typedefs
+   */
+
+  typedef admin-state {
+    type enumeration {
+      enum unknown {
+        value 1;
+        description
+          "The resource is unable to report administrative state.";
+      }
+      enum locked {
+        value 2;
+        description
+          "The resource is administratively prohibited from use.";
+      }
+      enum shutting-down {
+        value 3;
+        description
+          "The resource usage is administratively limited to current
+           instances of use.";
+      }
+      enum unlocked {
+        value 4;
+        description
+          "The resource is not administratively prohibited from
+           use.";
+      }
+    }
+    description
+      "Represents the various possible administrative states.";
+    reference
+      "RFC 4268: Entity State MIB - EntityAdminState";
+  }
+
+  typedef oper-state {
+    type enumeration {
+      enum unknown {
+        value 1;
+        description
+          "The resource is unable to report its operational state.";
+      }
+      enum disabled {
+        value 2;
+        description
+          "The resource is totally inoperable.";
+      }
+      enum enabled {
+        value 3;
+
+        description
+          "The resource is partially or fully operable.";
+      }
+      enum testing {
+        value 4;
+        description
+          "The resource is currently being tested and cannot
+           therefore report whether or not it is operational.";
+      }
+    }
+    description
+      "Represents the possible values of operational states.";
+    reference
+      "RFC 4268: Entity State MIB - EntityOperState";
+  }
+
+  typedef usage-state {
+    type enumeration {
+      enum unknown {
+        value 1;
+        description
+          "The resource is unable to report usage state.";
+      }
+      enum idle {
+        value 2;
+        description
+          "The resource is servicing no users.";
+      }
+      enum active {
+        value 3;
+        description
+          "The resource is currently in use, and it has sufficient
+           spare capacity to provide for additional users.";
+      }
+      enum busy {
+        value 4;
+        description
+          "The resource is currently in use, but it currently has no
+           spare capacity to provide for additional users.";
+      }
+    }
+    description
+      "Represents the possible values of usage states.";
+    reference
+      "RFC 4268: Entity State MIB -  EntityUsageState";
+  }
+
+  typedef alarm-state {
+    type bits {
+      bit unknown {
+        position 0;
+        description
+          "The resource is unable to report alarm state.";
+      }
+      bit under-repair {
+        position 1;
+        description
+          "The resource is currently being repaired, which, depending
+           on the implementation, may make the other values in this
+           bit string not meaningful.";
+      }
+      bit critical {
+        position 2;
+        description
+          "One or more critical alarms are active against the
+           resource.";
+      }
+      bit major {
+        position 3;
+        description
+          "One or more major alarms are active against the
+           resource.";
+      }
+      bit minor {
+        position 4;
+        description
+          "One or more minor alarms are active against the
+           resource.";
+      }
+      bit warning {
+        position 5;
+        description
+          "One or more warning alarms are active against the
+           resource.";
+      }
+      bit indeterminate {
+        position 6;
+        description
+          "One or more alarms of whose perceived severity cannot be
+           determined are active against this resource.";
+      }
+    }
+    description
+      "Represents the possible values of alarm states.  An alarm is a
+       persistent indication of an error or warning condition.
+
+       When no bits of this attribute are set, then no active alarms
+       are known against this component and it is not under repair.";
+    reference
+      "RFC 4268: Entity State MIB - EntityAlarmStatus";
+  }
+
+  typedef standby-state {
+    type enumeration {
+      enum unknown {
+        value 1;
+        description
+          "The resource is unable to report standby state.";
+      }
+      enum hot-standby {
+        value 2;
+        description
+          "The resource is not providing service, but it will be
+           immediately able to take over the role of the resource to
+           be backed up, without the need for initialization
+           activity, and will contain the same information as the
+           resource to be backed up.";
+      }
+      enum cold-standby {
+        value 3;
+        description
+          "The resource is to back up another resource, but it will
+           not be immediately able to take over the role of a
+           resource to be backed up and will require some
+           initialization activity.";
+      }
+      enum providing-service {
+        value 4;
+        description
+          "The resource is providing service.";
+      }
+    }
+    description
+      "Represents the possible values of standby states.";
+    reference
+      "RFC 4268: Entity State MIB - EntityStandbyStatus";
+  }
+
+  typedef sensor-value-type {
+    type enumeration {
+      enum other {
+        value 1;
+        description
+          "A measure other than those listed below.";
+      }
+      enum unknown {
+        value 2;
+        description
+          "An unknown measurement or arbitrary, relative numbers";
+      }
+      enum volts-AC {
+        value 3;
+        description
+          "A measure of electric potential (alternating current).";
+      }
+      enum volts-DC {
+        value 4;
+        description
+          "A measure of electric potential (direct current).";
+      }
+      enum amperes {
+        value 5;
+        description
+          "A measure of electric current.";
+      }
+      enum watts {
+        value 6;
+        description
+          "A measure of power.";
+      }
+      enum hertz {
+        value 7;
+        description
+          "A measure of frequency.";
+      }
+      enum celsius {
+        value 8;
+        description
+          "A measure of temperature.";
+      }
+      enum percent-RH {
+        value 9;
+        description
+          "A measure of percent relative humidity.";
+      }
+      enum rpm {
+        value 10;
+        description
+          "A measure of shaft revolutions per minute.";
+      }
+      enum cmm {
+        value 11;
+        description
+          "A measure of cubic meters per minute (airflow).";
+      }
+      enum truth-value {
+        value 12;
+        description
+          "Value is one of 1 (true) or 2 (false)";
+      }
+    }
+    description
+      "A node using this data type represents the sensor measurement
+       data type associated with a physical sensor value.  The actual
+       data units are determined by examining a node of this type
+       together with the associated sensor-value-scale node.
+
+       A node of this type SHOULD be defined together with nodes of
+       type sensor-value-scale and type sensor-value-precision.
+       These three types are used to identify the semantics of a node
+       of type sensor-value.";
+    reference
+      "RFC 3433: Entity Sensor Management Information Base -
+                 EntitySensorDataType";
+  }
+
+  typedef sensor-value-scale {
+    type enumeration {
+      enum yocto {
+        value 1;
+        description
+          "Data scaling factor of 10^-24.";
+      }
+      enum zepto {
+        value 2;
+        description
+          "Data scaling factor of 10^-21.";
+      }
+      enum atto {
+        value 3;
+        description
+          "Data scaling factor of 10^-18.";
+      }
+      enum femto {
+        value 4;
+        description
+          "Data scaling factor of 10^-15.";
+      }
+      enum pico {
+        value 5;
+        description
+          "Data scaling factor of 10^-12.";
+      }
+      enum nano {
+        value 6;
+        description
+          "Data scaling factor of 10^-9.";
+      }
+      enum micro {
+        value 7;
+        description
+          "Data scaling factor of 10^-6.";
+      }
+      enum milli {
+        value 8;
+        description
+          "Data scaling factor of 10^-3.";
+      }
+      enum units {
+        value 9;
+        description
+          "Data scaling factor of 10^0.";
+      }
+      enum kilo {
+        value 10;
+        description
+          "Data scaling factor of 10^3.";
+      }
+      enum mega {
+        value 11;
+        description
+          "Data scaling factor of 10^6.";
+      }
+      enum giga {
+        value 12;
+        description
+          "Data scaling factor of 10^9.";
+      }
+      enum tera {
+        value 13;
+        description
+          "Data scaling factor of 10^12.";
+      }
+      enum peta {
+        value 14;
+        description
+          "Data scaling factor of 10^15.";
+      }
+      enum exa {
+        value 15;
+        description
+          "Data scaling factor of 10^18.";
+      }
+      enum zetta {
+        value 16;
+        description
+          "Data scaling factor of 10^21.";
+      }
+      enum yotta {
+        value 17;
+        description
+          "Data scaling factor of 10^24.";
+      }
+    }
+    description
+      "A node using this data type represents a data scaling factor,
+       represented with an International System of Units (SI) prefix.
+       The actual data units are determined by examining a node of
+       this type together with the associated sensor-value-type.
+
+       A node of this type SHOULD be defined together with nodes of
+       type sensor-value-type and type sensor-value-precision.
+       Together, associated nodes of these three types are used to
+       identify the semantics of a node of type sensor-value.";
+    reference
+      "RFC 3433: Entity Sensor Management Information Base -
+                 EntitySensorDataScale";
+  }
+
+  typedef sensor-value-precision {
+    type int8 {
+      range "-8 .. 9";
+    }
+    description
+      "A node using this data type represents a sensor value
+       precision range.
+
+       A node of this type SHOULD be defined together with nodes of
+       type sensor-value-type and type sensor-value-scale.  Together,
+       associated nodes of these three types are used to identify the
+       semantics of a node of type sensor-value.
+
+       If a node of this type contains a value in the range 1 to 9,
+       it represents the number of decimal places in the fractional
+       part of an associated sensor-value fixed-point number.
+
+       If a node of this type contains a value in the range -8 to -1,
+       it represents the number of accurate digits in the associated
+       sensor-value fixed-point number.
+
+       The value zero indicates the associated sensor-value node is
+       not a fixed-point number.
+
+       Server implementers must choose a value for the associated
+       sensor-value-precision node so that the precision and accuracy
+       of the associated sensor-value node is correctly indicated.
+
+       For example, a component representing a temperature sensor
+       that can measure 0 to 100 degrees C in 0.1 degree
+       increments, +/- 0.05 degrees, would have a
+       sensor-value-precision value of '1', a sensor-value-scale
+       value of 'units', and a sensor-value ranging from '0' to
+       '1000'.  The sensor-value would be interpreted as
+       'degrees C * 10'.";
+    reference
+      "RFC 3433: Entity Sensor Management Information Base -
+                 EntitySensorPrecision";
+  }
+
+  typedef sensor-value {
+    type int32 {
+      range "-1000000000 .. 1000000000";
+    }
+    description
+     "A node using this data type represents a sensor value.
+
+      A node of this type SHOULD be defined together with nodes of
+      type sensor-value-type, type sensor-value-scale, and
+      type sensor-value-precision.  Together, associated nodes of
+      those three types are used to identify the semantics of a node
+      of this data type.
+
+      The semantics of a node using this data type are determined by
+      the value of the associated sensor-value-type node.
+
+      If the associated sensor-value-type node is equal to 'voltsAC',
+      'voltsDC', 'amperes', 'watts', 'hertz', 'celsius', or 'cmm',
+      then a node of this type MUST contain a fixed-point number
+      ranging from -999,999,999 to +999,999,999.  The value
+      -1000000000 indicates an underflow error.  The value
+      +1000000000 indicates an overflow error.  The
+      sensor-value-precision indicates how many fractional digits
+      are represented in the associated sensor-value node.
+
+      If the associated sensor-value-type node is equal to
+      'percentRH', then a node of this type MUST contain a number
+      ranging from 0 to 100.
+
+      If the associated sensor-value-type node is equal to 'rpm',
+      then a node of this type MUST contain a number ranging from
+      -999,999,999 to +999,999,999.
+
+      If the associated sensor-value-type node is equal to
+      'truth-value', then a node of this type MUST contain either the
+      value 1 (true) or the value 2 (false).
+
+      If the associated sensor-value-type node is equal to 'other' or
+      'unknown', then a node of this type MUST contain a number
+      ranging from -1000000000 to 1000000000.";
+    reference
+      "RFC 3433: Entity Sensor Management Information Base -
+                 EntitySensorValue";
+  }
+
+  typedef sensor-status {
+    type enumeration {
+      enum ok {
+        value 1;
+        description
+          "Indicates that the server can obtain the sensor value.";
+      }
+      enum unavailable {
+        value 2;
+        description
+          "Indicates that the server presently cannot obtain the
+           sensor value.";
+      }
+      enum nonoperational {
+        value 3;
+        description
+          "Indicates that the server believes the sensor is broken.
+           The sensor could have a hard failure (disconnected wire)
+           or a soft failure such as out-of-range, jittery, or wildly
+           fluctuating readings.";
+      }
+    }
+    description
+      "A node using this data type represents the operational status
+       of a physical sensor.";
+    reference
+      "RFC 3433: Entity Sensor Management Information Base -
+                 EntitySensorStatus";
+  }
+
+  /*
+   * Data nodes
+   */
+
+  container hardware {
+    description
+      "Data nodes representing components.
+
+       If the server supports configuration of hardware components,
+       then this data model is instantiated in the configuration
+       datastores supported by the server.  The leaf-list 'datastore'
+       for the module 'ietf-hardware' in the YANG library provides
+       this information.";
+
+    leaf last-change {
+      type yang:date-and-time;
+      config false;
+      description
+        "The time the '/hardware/component' list changed in the
+         operational state.";
+    }
+
+    list component {
+      key name;
+      description
+        "List of components.
+
+         When the server detects a new hardware component, it
+         initializes a list entry in the operational state.
+
+         If the server does not support configuration of hardware
+         components, list entries in the operational state are
+         initialized with values for all nodes as detected by the
+         implementation.
+
+         Otherwise, this procedure is followed:
+
+           1. If there is an entry in the '/hardware/component' list
+              in the intended configuration with values for the nodes
+              'class', 'parent', and 'parent-rel-pos' that are equal
+              to the detected values, then the list entry in the
+              operational state is initialized with the configured
+              values, including the 'name'.
+
+           2. Otherwise (i.e., there is no matching configuration
+              entry), the list entry in the operational state is
+              initialized with values for all nodes as detected by
+              the implementation.
+
+         If the '/hardware/component' list in the intended
+         configuration is modified, then the system MUST behave as if
+         it re-initializes itself and follow the procedure in (1).";
+      reference
+        "RFC 6933: Entity MIB (Version 4) - entPhysicalEntry";
+
+      leaf name {
+        type string;
+        description
+          "The name assigned to this component.
+
+           This name is not required to be the same as
+           entPhysicalName.";
+      }
+
+      leaf class {
+        type identityref {
+          base ianahw:hardware-class;
+        }
+        mandatory true;
+        description
+          "An indication of the general hardware type of the
+           component.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) - entPhysicalClass";
+      }
+
+      leaf physical-index {
+        if-feature entity-mib;
+        type int32 {
+          range "1..2147483647";
+        }
+        config false;
+        description
+          "The entPhysicalIndex for the entPhysicalEntry represented
+           by this list entry.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) - entPhysicalIndex";
+      }
+
+      leaf description {
+        type string;
+        config false;
+        description
+          "A textual description of the component.  This node should
+           contain a string that identifies the manufacturer's name
+           for the component and should be set to a distinct value
+           for each version or model of the component.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) - entPhysicalDescr";
+      }
+
+      leaf parent {
+        type leafref {
+          path "../../component/name";
+          require-instance false;
+        }
+        description
+          "The name of the component that physically contains this
+           component.
+
+           If this leaf is not instantiated, it indicates that this
+           component is not contained in any other component.
+
+           In the event that a physical component is contained by
+           more than one physical component (e.g., double-wide
+           modules), this node contains the name of one of these
+           components.  An implementation MUST use the same name
+           every time this node is instantiated.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) - entPhysicalContainedIn";
+      }
+
+      leaf parent-rel-pos {
+        type int32 {
+          range "0 .. 2147483647";
+        }
+        description
+          "An indication of the relative position of this child
+           component among all its sibling components.  Sibling
+           components are defined as components that:
+
+             o share the same value of the 'parent' node and
+
+             o share a common base identity for the 'class' node.
+
+           Note that the last rule gives implementations flexibility
+           in how components are numbered.  For example, some
+           implementations might have a single number series for all
+           components derived from 'ianahw:port', while some others
+           might have different number series for different
+           components with identities derived from 'ianahw:port' (for
+           example, one for registered jack 45 (RJ45) and one for
+           small form-factor pluggable (SFP)).";
+
+        reference
+          "RFC 6933: Entity MIB (Version 4) -
+                     entPhysicalParentRelPos";
+      }
+
+      leaf-list contains-child {
+        type leafref {
+          path "../../component/name";
+        }
+        config false;
+        description
+          "The name of the contained component.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) - entPhysicalChildIndex";
+      }
+
+      leaf hardware-rev {
+        type string;
+        config false;
+        description
+          "The vendor-specific hardware revision string for the
+           component.  The preferred value is the hardware revision
+           identifier actually printed on the component itself (if
+           present).";
+        reference
+          "RFC 6933: Entity MIB (Version 4) -
+                     entPhysicalHardwareRev";
+      }
+
+      leaf firmware-rev {
+        type string;
+        config false;
+        description
+          "The vendor-specific firmware revision string for the
+           component.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) -
+                     entPhysicalFirmwareRev";
+      }
+
+      leaf software-rev {
+        type string;
+        config false;
+
+        description
+          "The vendor-specific software revision string for the
+           component.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) -
+                     entPhysicalSoftwareRev";
+      }
+
+      leaf serial-num {
+        type string;
+        config false;
+        description
+          "The vendor-specific serial number string for the
+           component.  The preferred value is the serial number
+           string actually printed on the component itself (if
+           present).";
+        reference
+          "RFC 6933: Entity MIB (Version 4) - entPhysicalSerialNum";
+      }
+
+      leaf mfg-name {
+        type string;
+        config false;
+        description
+          "The name of the manufacturer of this physical component.
+           The preferred value is the manufacturer name string
+           actually printed on the component itself (if present).
+
+           Note that comparisons between instances of the
+           'model-name', 'firmware-rev', 'software-rev', and
+           'serial-num' nodes are only meaningful amongst components
+           with the same value of 'mfg-name'.
+
+           If the manufacturer name string associated with the
+           physical component is unknown to the server, then this
+           node is not instantiated.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) - entPhysicalMfgName";
+      }
+
+      leaf model-name {
+        type string;
+        config false;
+        description
+          "The vendor-specific model name identifier string
+           associated with this physical component.  The preferred
+           value is the customer-visible part number, which may be
+           printed on the component itself.
+           If the model name string associated with the physical
+           component is unknown to the server, then this node is not
+           instantiated.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) - entPhysicalModelName";
+      }
+
+      leaf alias {
+        type string;
+        description
+          "An 'alias' name for the component, as specified by a
+           network manager, that provides a non-volatile 'handle' for
+           the component.
+
+           If no configured value exists, the server MAY set the
+           value of this node to a locally unique value in the
+           operational state.
+
+           A server implementation MAY map this leaf to the
+           entPhysicalAlias MIB object.  Such an implementation needs
+           to use some mechanism to handle the differences in size
+           and characters allowed between this leaf and
+           entPhysicalAlias.  The definition of such a mechanism is
+           outside the scope of this document.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) - entPhysicalAlias";
+      }
+
+      leaf asset-id {
+        type string;
+        description
+          "This node is a user-assigned asset tracking identifier for
+           the component.
+
+           A server implementation MAY map this leaf to the
+           entPhysicalAssetID MIB object.  Such an implementation
+           needs to use some mechanism to handle the differences in
+           size and characters allowed between this leaf and
+           entPhysicalAssetID.  The definition of such a mechanism is
+           outside the scope of this document.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) - entPhysicalAssetID";
+      }
+
+      leaf is-fru {
+        type boolean;
+        config false;
+
+        description
+          "This node indicates whether or not this component is
+           considered a 'field-replaceable unit' by the vendor.  If
+           this node contains the value 'true', then this component
+           identifies a field-replaceable unit.  For all components
+           that are permanently contained within a field-replaceable
+           unit, the value 'false' should be returned for this
+           node.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) - entPhysicalIsFRU";
+      }
+
+      leaf mfg-date {
+        type yang:date-and-time;
+        config false;
+        description
+          "The date of manufacturing of the managed component.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) - entPhysicalMfgDate";
+      }
+
+      leaf-list uri {
+        type inet:uri;
+        description
+          "This node contains identification information about the
+           component.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) - entPhysicalUris";
+      }
+
+      leaf uuid {
+        type yang:uuid;
+        config false;
+        description
+          "A Universally Unique Identifier of the component.";
+        reference
+          "RFC 6933: Entity MIB (Version 4) - entPhysicalUUID";
+      }
+
+      container state {
+        if-feature hardware-state;
+        description
+          "State-related nodes";
+        reference
+          "RFC 4268: Entity State MIB";
+
+        leaf state-last-changed {
+          type yang:date-and-time;
+          config false;
+          description
+            "The date and time when the value of any of the
+             admin-state, oper-state, usage-state, alarm-state, or
+             standby-state changed for this component.
+
+             If there has been no change since the last
+             re-initialization of the local system, this node
+             contains the date and time of local system
+             initialization.  If there has been no change since the
+             component was added to the local system, this node
+             contains the date and time of the insertion.";
+          reference
+            "RFC 4268: Entity State MIB - entStateLastChanged";
+        }
+
+        leaf admin-state {
+          type admin-state;
+          description
+            "The administrative state for this component.
+
+             This node refers to a component's administrative
+             permission to service both other components within its
+             containment hierarchy as well other users of its
+             services defined by means outside the scope of this
+             module.
+
+             Some components exhibit only a subset of the remaining
+             administrative state values.  Some components cannot be
+             locked; hence, this node exhibits only the 'unlocked'
+             state.  Other components cannot be shut down gracefully;
+             hence, this node does not exhibit the 'shutting-down'
+             state.";
+          reference
+            "RFC 4268: Entity State MIB - entStateAdmin";
+        }
+
+        leaf oper-state {
+          type oper-state;
+          config false;
+          description
+            "The operational state for this component.
+
+             Note that this node does not follow the administrative
+             state.  An administrative state of 'down' does not
+             predict an operational state of 'disabled'.
+
+             Note that some implementations may not be able to
+             accurately report oper-state while the admin-state node
+             has a value other than 'unlocked'.  In these cases, this
+             node MUST have a value of 'unknown'.";
+          reference
+            "RFC 4268: Entity State MIB - entStateOper";
+        }
+
+        leaf usage-state {
+          type usage-state;
+          config false;
+          description
+            "The usage state for this component.
+
+             This node refers to a component's ability to service
+             more components in a containment hierarchy.
+
+             Some components will exhibit only a subset of the usage
+             state values.  Components that are unable to ever
+             service any components within a containment hierarchy
+             will always have a usage state of 'busy'.  In some
+             cases, a component will be able to support only one
+             other component within its containment hierarchy and
+             will therefore only exhibit values of 'idle' and
+             'busy'.";
+          reference
+            "RFC 4268: Entity State MIB - entStateUsage";
+        }
+
+        leaf alarm-state {
+          type alarm-state;
+          config false;
+          description
+            "The alarm state for this component.  It does not
+             include the alarms raised on child components within its
+             containment hierarchy.";
+          reference
+            "RFC 4268: Entity State MIB - entStateAlarm";
+        }
+
+        leaf standby-state {
+          type standby-state;
+          config false;
+          description
+            "The standby state for this component.
+
+             Some components will exhibit only a subset of the
+             remaining standby state values.  If this component
+             cannot operate in a standby role, the value of this node
+             will always be 'providing-service'.";
+          reference
+            "RFC 4268: Entity State MIB - entStateStandby";
+        }
+      }
+
+      container sensor-data {
+        when 'derived-from-or-self(../class,
+                                   "ianahw:sensor")' {
+          description
+            "Sensor data nodes present for any component of type
+             'sensor'";
+        }
+        if-feature hardware-sensor;
+        config false;
+
+        description
+          "Sensor-related nodes.";
+        reference
+          "RFC 3433: Entity Sensor Management Information Base";
+
+        leaf value {
+          type sensor-value;
+          description
+            "The most recent measurement obtained by the server
+             for this sensor.
+
+             A client that periodically fetches this node should also
+             fetch the nodes 'value-type', 'value-scale', and
+             'value-precision', since they may change when the value
+             is changed.";
+          reference
+            "RFC 3433: Entity Sensor Management Information Base -
+                       entPhySensorValue";
+        }
+
+        leaf value-type {
+          type sensor-value-type;
+          description
+            "The type of data units associated with the
+             sensor value";
+          reference
+            "RFC 3433: Entity Sensor Management Information Base -
+                       entPhySensorType";
+        }
+        leaf value-scale {
+          type sensor-value-scale;
+          description
+            "The (power of 10) scaling factor associated
+             with the sensor value";
+          reference
+            "RFC 3433: Entity Sensor Management Information Base -
+                       entPhySensorScale";
+        }
+
+        leaf value-precision {
+          type sensor-value-precision;
+          description
+            "The number of decimal places of precision
+             associated with the sensor value";
+          reference
+            "RFC 3433: Entity Sensor Management Information Base -
+                       entPhySensorPrecision";
+        }
+
+        leaf oper-status {
+          type sensor-status;
+          description
+            "The operational status of the sensor.";
+          reference
+            "RFC 3433: Entity Sensor Management Information Base -
+                       entPhySensorOperStatus";
+        }
+
+        leaf units-display {
+          type string;
+          description
+            "A textual description of the data units that should be
+             used in the display of the sensor value.";
+          reference
+            "RFC 3433: Entity Sensor Management Information Base -
+                       entPhySensorUnitsDisplay";
+        }
+
+        leaf value-timestamp {
+          type yang:date-and-time;
+          description
+            "The time the status and/or value of this sensor was last
+             obtained by the server.";
+          reference
+            "RFC 3433: Entity Sensor Management Information Base -
+                       entPhySensorValueTimeStamp";
+        }
+        leaf value-update-rate {
+          type uint32;
+          units "milliseconds";
+          description
+            "An indication of the frequency that the server updates
+             the associated 'value' node, represented in
+             milliseconds.  The value zero indicates:
+
+              - the sensor value is updated on demand (e.g.,
+                when polled by the server for a get-request),
+
+              - the sensor value is updated when the sensor
+                value changes (event-driven), or
+
+              - the server does not know the update rate.";
+          reference
+            "RFC 3433: Entity Sensor Management Information Base -
+                       entPhySensorValueUpdateRate";
+        }
+      }
+    }
+  }
+
+  /*
+   * Notifications
+   */
+
+  notification hardware-state-change {
+    description
+      "A hardware-state-change notification is generated when the
+       value of /hardware/last-change changes in the operational
+       state.";
+    reference
+      "RFC 6933: Entity MIB (Version 4) - entConfigChange";
+  }
+
+  notification hardware-state-oper-enabled {
+    if-feature hardware-state;
+    description
+      "A hardware-state-oper-enabled notification signifies that a
+       component has transitioned into the 'enabled' state.";
+
+    leaf name {
+      type leafref {
+        path "/hardware/component/name";
+      }
+
+      description
+        "The name of the component that has transitioned into the
+         'enabled' state.";
+    }
+    leaf admin-state {
+      type leafref {
+        path "/hardware/component/state/admin-state";
+      }
+      description
+        "The administrative state for the component.";
+    }
+    leaf alarm-state {
+      type leafref {
+        path "/hardware/component/state/alarm-state";
+      }
+      description
+        "The alarm state for the component.";
+    }
+    reference
+      "RFC 4268: Entity State MIB - entStateOperEnabled";
+  }
+
+  notification hardware-state-oper-disabled {
+    if-feature hardware-state;
+    description
+      "A hardware-state-oper-disabled notification signifies that a
+       component has transitioned into the 'disabled' state.";
+
+    leaf name {
+      type leafref {
+        path "/hardware/component/name";
+      }
+      description
+        "The name of the component that has transitioned into the
+         'disabled' state.";
+    }
+    leaf admin-state {
+      type leafref {
+        path "/hardware/component/state/admin-state";
+      }
+      description
+        "The administrative state for the component.";
+    }
+    leaf alarm-state {
+      type leafref {
+        path "/hardware/component/state/alarm-state";
+      }
+
+      description
+        "The alarm state for the component.";
+    }
+    reference
+      "RFC 4268: Entity State MIB - entStateOperDisabled";
+  }
+
+}
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-inet-types@2013-07-15.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-inet-types@2013-07-15.yang
new file mode 100644
index 0000000000000000000000000000000000000000..eacefb6363de1beb543567a0fa705571b7dc57a2
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-inet-types@2013-07-15.yang
@@ -0,0 +1,458 @@
+module ietf-inet-types {
+
+  namespace "urn:ietf:params:xml:ns:yang:ietf-inet-types";
+  prefix "inet";
+
+  organization
+   "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+  contact
+   "WG Web:   <http://tools.ietf.org/wg/netmod/>
+    WG List:  <mailto:netmod@ietf.org>
+
+    WG Chair: David Kessens
+              <mailto:david.kessens@nsn.com>
+
+    WG Chair: Juergen Schoenwaelder
+              <mailto:j.schoenwaelder@jacobs-university.de>
+
+    Editor:   Juergen Schoenwaelder
+              <mailto:j.schoenwaelder@jacobs-university.de>";
+
+  description
+   "This module contains a collection of generally useful derived
+    YANG data types for Internet addresses and related things.
+
+    Copyright (c) 2013 IETF Trust and the persons identified as
+    authors of the code.  All rights reserved.
+
+    Redistribution and use in source and binary forms, with or
+    without modification, is permitted pursuant to, and subject
+    to the license terms contained in, the Simplified BSD License
+    set forth in Section 4.c of the IETF Trust's Legal Provisions
+    Relating to IETF Documents
+    (http://trustee.ietf.org/license-info).
+
+    This version of this YANG module is part of RFC 6991; see
+    the RFC itself for full legal notices.";
+
+  revision 2013-07-15 {
+    description
+     "This revision adds the following new data types:
+      - ip-address-no-zone
+      - ipv4-address-no-zone
+      - ipv6-address-no-zone";
+    reference
+     "RFC 6991: Common YANG Data Types";
+  }
+
+  revision 2010-09-24 {
+    description
+     "Initial revision.";
+    reference
+     "RFC 6021: Common YANG Data Types";
+  }
+
+  /*** collection of types related to protocol fields ***/
+
+  typedef ip-version {
+    type enumeration {
+      enum unknown {
+        value "0";
+        description
+         "An unknown or unspecified version of the Internet
+          protocol.";
+      }
+      enum ipv4 {
+        value "1";
+        description
+         "The IPv4 protocol as defined in RFC 791.";
+      }
+      enum ipv6 {
+        value "2";
+        description
+         "The IPv6 protocol as defined in RFC 2460.";
+      }
+    }
+    description
+     "This value represents the version of the IP protocol.
+
+      In the value set and its semantics, this type is equivalent
+      to the InetVersion textual convention of the SMIv2.";
+    reference
+     "RFC  791: Internet Protocol
+      RFC 2460: Internet Protocol, Version 6 (IPv6) Specification
+      RFC 4001: Textual Conventions for Internet Network Addresses";
+  }
+
+  typedef dscp {
+    type uint8 {
+      range "0..63";
+    }
+    description
+     "The dscp type represents a Differentiated Services Code Point
+      that may be used for marking packets in a traffic stream.
+      In the value set and its semantics, this type is equivalent
+      to the Dscp textual convention of the SMIv2.";
+    reference
+     "RFC 3289: Management Information Base for the Differentiated
+                Services Architecture
+      RFC 2474: Definition of the Differentiated Services Field
+                (DS Field) in the IPv4 and IPv6 Headers
+      RFC 2780: IANA Allocation Guidelines For Values In
+                the Internet Protocol and Related Headers";
+  }
+
+  typedef ipv6-flow-label {
+    type uint32 {
+      range "0..1048575";
+    }
+    description
+     "The ipv6-flow-label type represents the flow identifier or Flow
+      Label in an IPv6 packet header that may be used to
+      discriminate traffic flows.
+
+      In the value set and its semantics, this type is equivalent
+      to the IPv6FlowLabel textual convention of the SMIv2.";
+    reference
+     "RFC 3595: Textual Conventions for IPv6 Flow Label
+      RFC 2460: Internet Protocol, Version 6 (IPv6) Specification";
+  }
+
+  typedef port-number {
+    type uint16 {
+      range "0..65535";
+    }
+    description
+     "The port-number type represents a 16-bit port number of an
+      Internet transport-layer protocol such as UDP, TCP, DCCP, or
+      SCTP.  Port numbers are assigned by IANA.  A current list of
+      all assignments is available from <http://www.iana.org/>.
+
+      Note that the port number value zero is reserved by IANA.  In
+      situations where the value zero does not make sense, it can
+      be excluded by subtyping the port-number type.
+      In the value set and its semantics, this type is equivalent
+      to the InetPortNumber textual convention of the SMIv2.";
+    reference
+     "RFC  768: User Datagram Protocol
+      RFC  793: Transmission Control Protocol
+      RFC 4960: Stream Control Transmission Protocol
+      RFC 4340: Datagram Congestion Control Protocol (DCCP)
+      RFC 4001: Textual Conventions for Internet Network Addresses";
+  }
+
+  /*** collection of types related to autonomous systems ***/
+
+  typedef as-number {
+    type uint32;
+    description
+     "The as-number type represents autonomous system numbers
+      which identify an Autonomous System (AS).  An AS is a set
+      of routers under a single technical administration, using
+      an interior gateway protocol and common metrics to route
+      packets within the AS, and using an exterior gateway
+      protocol to route packets to other ASes.  IANA maintains
+      the AS number space and has delegated large parts to the
+      regional registries.
+
+      Autonomous system numbers were originally limited to 16
+      bits.  BGP extensions have enlarged the autonomous system
+      number space to 32 bits.  This type therefore uses an uint32
+      base type without a range restriction in order to support
+      a larger autonomous system number space.
+
+      In the value set and its semantics, this type is equivalent
+      to the InetAutonomousSystemNumber textual convention of
+      the SMIv2.";
+    reference
+     "RFC 1930: Guidelines for creation, selection, and registration
+                of an Autonomous System (AS)
+      RFC 4271: A Border Gateway Protocol 4 (BGP-4)
+      RFC 4001: Textual Conventions for Internet Network Addresses
+      RFC 6793: BGP Support for Four-Octet Autonomous System (AS)
+                Number Space";
+  }
+
+  /*** collection of types related to IP addresses and hostnames ***/
+
+  typedef ip-address {
+    type union {
+      type inet:ipv4-address;
+      type inet:ipv6-address;
+    }
+    description
+     "The ip-address type represents an IP address and is IP
+      version neutral.  The format of the textual representation
+      implies the IP version.  This type supports scoped addresses
+      by allowing zone identifiers in the address format.";
+    reference
+     "RFC 4007: IPv6 Scoped Address Architecture";
+  }
+
+  typedef ipv4-address {
+    type string {
+      pattern
+        '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+      +  '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'
+      + '(%[\p{N}\p{L}]+)?';
+    }
+    description
+      "The ipv4-address type represents an IPv4 address in
+       dotted-quad notation.  The IPv4 address may include a zone
+       index, separated by a % sign.
+
+       The zone index is used to disambiguate identical address
+       values.  For link-local addresses, the zone index will
+       typically be the interface index number or the name of an
+       interface.  If the zone index is not present, the default
+       zone of the device will be used.
+
+       The canonical format for the zone index is the numerical
+       format";
+  }
+
+  typedef ipv6-address {
+    type string {
+      pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}'
+            + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|'
+            + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}'
+            + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))'
+            + '(%[\p{N}\p{L}]+)?';
+      pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|'
+            + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)'
+            + '(%.+)?';
+    }
+    description
+     "The ipv6-address type represents an IPv6 address in full,
+      mixed, shortened, and shortened-mixed notation.  The IPv6
+      address may include a zone index, separated by a % sign.
+
+      The zone index is used to disambiguate identical address
+      values.  For link-local addresses, the zone index will
+      typically be the interface index number or the name of an
+      interface.  If the zone index is not present, the default
+      zone of the device will be used.
+
+      The canonical format of IPv6 addresses uses the textual
+      representation defined in Section 4 of RFC 5952.  The
+      canonical format for the zone index is the numerical
+      format as described in Section 11.2 of RFC 4007.";
+    reference
+     "RFC 4291: IP Version 6 Addressing Architecture
+      RFC 4007: IPv6 Scoped Address Architecture
+      RFC 5952: A Recommendation for IPv6 Address Text
+                Representation";
+  }
+
+  typedef ip-address-no-zone {
+    type union {
+      type inet:ipv4-address-no-zone;
+      type inet:ipv6-address-no-zone;
+    }
+    description
+     "The ip-address-no-zone type represents an IP address and is
+      IP version neutral.  The format of the textual representation
+      implies the IP version.  This type does not support scoped
+      addresses since it does not allow zone identifiers in the
+      address format.";
+    reference
+     "RFC 4007: IPv6 Scoped Address Architecture";
+  }
+
+  typedef ipv4-address-no-zone {
+    type inet:ipv4-address {
+      pattern '[0-9\.]*';
+    }
+    description
+      "An IPv4 address without a zone index.  This type, derived from
+       ipv4-address, may be used in situations where the zone is
+       known from the context and hence no zone index is needed.";
+  }
+
+  typedef ipv6-address-no-zone {
+    type inet:ipv6-address {
+      pattern '[0-9a-fA-F:\.]*';
+    }
+    description
+      "An IPv6 address without a zone index.  This type, derived from
+       ipv6-address, may be used in situations where the zone is
+       known from the context and hence no zone index is needed.";
+    reference
+     "RFC 4291: IP Version 6 Addressing Architecture
+      RFC 4007: IPv6 Scoped Address Architecture
+      RFC 5952: A Recommendation for IPv6 Address Text
+                Representation";
+  }
+
+  typedef ip-prefix {
+    type union {
+      type inet:ipv4-prefix;
+      type inet:ipv6-prefix;
+    }
+    description
+     "The ip-prefix type represents an IP prefix and is IP
+      version neutral.  The format of the textual representations
+      implies the IP version.";
+  }
+
+  typedef ipv4-prefix {
+    type string {
+      pattern
+         '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+       +  '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'
+       + '/(([0-9])|([1-2][0-9])|(3[0-2]))';
+    }
+    description
+     "The ipv4-prefix type represents an IPv4 address prefix.
+      The prefix length is given by the number following the
+      slash character and must be less than or equal to 32.
+
+      A prefix length value of n corresponds to an IP address
+      mask that has n contiguous 1-bits from the most
+      significant bit (MSB) and all other bits set to 0.
+
+      The canonical format of an IPv4 prefix has all bits of
+      the IPv4 address set to zero that are not part of the
+      IPv4 prefix.";
+  }
+
+  typedef ipv6-prefix {
+    type string {
+      pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}'
+            + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|'
+            + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}'
+            + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))'
+            + '(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))';
+      pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|'
+            + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)'
+            + '(/.+)';
+    }
+
+    description
+     "The ipv6-prefix type represents an IPv6 address prefix.
+      The prefix length is given by the number following the
+      slash character and must be less than or equal to 128.
+
+      A prefix length value of n corresponds to an IP address
+      mask that has n contiguous 1-bits from the most
+      significant bit (MSB) and all other bits set to 0.
+
+      The IPv6 address should have all bits that do not belong
+      to the prefix set to zero.
+
+      The canonical format of an IPv6 prefix has all bits of
+      the IPv6 address set to zero that are not part of the
+      IPv6 prefix.  Furthermore, the IPv6 address is represented
+      as defined in Section 4 of RFC 5952.";
+    reference
+     "RFC 5952: A Recommendation for IPv6 Address Text
+                Representation";
+  }
+
+  /*** collection of domain name and URI types ***/
+
+  typedef domain-name {
+    type string {
+      pattern
+        '((([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.)*'
+      + '([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.?)'
+      + '|\.';
+      length "1..253";
+    }
+    description
+     "The domain-name type represents a DNS domain name.  The
+      name SHOULD be fully qualified whenever possible.
+
+      Internet domain names are only loosely specified.  Section
+      3.5 of RFC 1034 recommends a syntax (modified in Section
+      2.1 of RFC 1123).  The pattern above is intended to allow
+      for current practice in domain name use, and some possible
+      future expansion.  It is designed to hold various types of
+      domain names, including names used for A or AAAA records
+      (host names) and other records, such as SRV records.  Note
+      that Internet host names have a stricter syntax (described
+      in RFC 952) than the DNS recommendations in RFCs 1034 and
+      1123, and that systems that want to store host names in
+      schema nodes using the domain-name type are recommended to
+      adhere to this stricter standard to ensure interoperability.
+
+      The encoding of DNS names in the DNS protocol is limited
+      to 255 characters.  Since the encoding consists of labels
+      prefixed by a length bytes and there is a trailing NULL
+      byte, only 253 characters can appear in the textual dotted
+      notation.
+
+      The description clause of schema nodes using the domain-name
+      type MUST describe when and how these names are resolved to
+      IP addresses.  Note that the resolution of a domain-name value
+      may require to query multiple DNS records (e.g., A for IPv4
+      and AAAA for IPv6).  The order of the resolution process and
+      which DNS record takes precedence can either be defined
+      explicitly or may depend on the configuration of the
+      resolver.
+
+      Domain-name values use the US-ASCII encoding.  Their canonical
+      format uses lowercase US-ASCII characters.  Internationalized
+      domain names MUST be A-labels as per RFC 5890.";
+    reference
+     "RFC  952: DoD Internet Host Table Specification
+      RFC 1034: Domain Names - Concepts and Facilities
+      RFC 1123: Requirements for Internet Hosts -- Application
+                and Support
+      RFC 2782: A DNS RR for specifying the location of services
+                (DNS SRV)
+      RFC 5890: Internationalized Domain Names in Applications
+                (IDNA): Definitions and Document Framework";
+  }
+
+  typedef host {
+    type union {
+      type inet:ip-address;
+      type inet:domain-name;
+    }
+    description
+     "The host type represents either an IP address or a DNS
+      domain name.";
+  }
+
+  typedef uri {
+    type string;
+    description
+     "The uri type represents a Uniform Resource Identifier
+      (URI) as defined by STD 66.
+
+      Objects using the uri type MUST be in US-ASCII encoding,
+      and MUST be normalized as described by RFC 3986 Sections
+      6.2.1, 6.2.2.1, and 6.2.2.2.  All unnecessary
+      percent-encoding is removed, and all case-insensitive
+      characters are set to lowercase except for hexadecimal
+      digits, which are normalized to uppercase as described in
+      Section 6.2.2.1.
+
+      The purpose of this normalization is to help provide
+      unique URIs.  Note that this normalization is not
+      sufficient to provide uniqueness.  Two URIs that are
+      textually distinct after this normalization may still be
+      equivalent.
+
+      Objects using the uri type may restrict the schemes that
+      they permit.  For example, 'data:' and 'urn:' schemes
+      might not be appropriate.
+
+      A zero-length URI is not a valid URI.  This can be used to
+      express 'URI absent' where required.
+
+      In the value set and its semantics, this type is equivalent
+      to the Uri SMIv2 textual convention defined in RFC 5017.";
+    reference
+     "RFC 3986: Uniform Resource Identifier (URI): Generic Syntax
+      RFC 3305: Report from the Joint W3C/IETF URI Planning Interest
+                Group: Uniform Resource Identifiers (URIs), URLs,
+                and Uniform Resource Names (URNs): Clarifications
+                and Recommendations
+      RFC 5017: MIB Textual Conventions for Uniform Resource
+                Identifiers (URIs)";
+  }
+
+}
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-yang-types@2013-07-15.yang b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-yang-types@2013-07-15.yang
new file mode 100644
index 0000000000000000000000000000000000000000..ee58fa3ab0042120d5607b8713d21fa0ba845895
--- /dev/null
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_hardware/yang/ietf-yang-types@2013-07-15.yang
@@ -0,0 +1,474 @@
+module ietf-yang-types {
+
+  namespace "urn:ietf:params:xml:ns:yang:ietf-yang-types";
+  prefix "yang";
+
+  organization
+   "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+  contact
+   "WG Web:   <http://tools.ietf.org/wg/netmod/>
+    WG List:  <mailto:netmod@ietf.org>
+
+    WG Chair: David Kessens
+              <mailto:david.kessens@nsn.com>
+
+    WG Chair: Juergen Schoenwaelder
+              <mailto:j.schoenwaelder@jacobs-university.de>
+
+    Editor:   Juergen Schoenwaelder
+              <mailto:j.schoenwaelder@jacobs-university.de>";
+
+  description
+   "This module contains a collection of generally useful derived
+    YANG data types.
+
+    Copyright (c) 2013 IETF Trust and the persons identified as
+    authors of the code.  All rights reserved.
+
+    Redistribution and use in source and binary forms, with or
+    without modification, is permitted pursuant to, and subject
+    to the license terms contained in, the Simplified BSD License
+    set forth in Section 4.c of the IETF Trust's Legal Provisions
+    Relating to IETF Documents
+    (http://trustee.ietf.org/license-info).
+
+    This version of this YANG module is part of RFC 6991; see
+    the RFC itself for full legal notices.";
+
+  revision 2013-07-15 {
+    description
+     "This revision adds the following new data types:
+      - yang-identifier
+      - hex-string
+      - uuid
+      - dotted-quad";
+    reference
+     "RFC 6991: Common YANG Data Types";
+  }
+
+  revision 2010-09-24 {
+    description
+     "Initial revision.";
+    reference
+     "RFC 6021: Common YANG Data Types";
+  }
+
+  /*** collection of counter and gauge types ***/
+
+  typedef counter32 {
+    type uint32;
+    description
+     "The counter32 type represents a non-negative integer
+      that monotonically increases until it reaches a
+      maximum value of 2^32-1 (4294967295 decimal), when it
+      wraps around and starts increasing again from zero.
+
+      Counters have no defined 'initial' value, and thus, a
+      single value of a counter has (in general) no information
+      content.  Discontinuities in the monotonically increasing
+      value normally occur at re-initialization of the
+      management system, and at other times as specified in the
+      description of a schema node using this type.  If such
+      other times can occur, for example, the creation of
+      a schema node of type counter32 at times other than
+      re-initialization, then a corresponding schema node
+      should be defined, with an appropriate type, to indicate
+      the last discontinuity.
+
+      The counter32 type should not be used for configuration
+      schema nodes.  A default statement SHOULD NOT be used in
+      combination with the type counter32.
+
+      In the value set and its semantics, this type is equivalent
+      to the Counter32 type of the SMIv2.";
+    reference
+     "RFC 2578: Structure of Management Information Version 2
+                (SMIv2)";
+  }
+
+  typedef zero-based-counter32 {
+    type yang:counter32;
+    default "0";
+    description
+     "The zero-based-counter32 type represents a counter32
+      that has the defined 'initial' value zero.
+
+      A schema node of this type will be set to zero (0) on creation
+      and will thereafter increase monotonically until it reaches
+      a maximum value of 2^32-1 (4294967295 decimal), when it
+      wraps around and starts increasing again from zero.
+
+      Provided that an application discovers a new schema node
+      of this type within the minimum time to wrap, it can use the
+      'initial' value as a delta.  It is important for a management
+      station to be aware of this minimum time and the actual time
+      between polls, and to discard data if the actual time is too
+      long or there is no defined minimum time.
+
+      In the value set and its semantics, this type is equivalent
+      to the ZeroBasedCounter32 textual convention of the SMIv2.";
+    reference
+      "RFC 4502: Remote Network Monitoring Management Information
+                 Base Version 2";
+  }
+
+  typedef counter64 {
+    type uint64;
+    description
+     "The counter64 type represents a non-negative integer
+      that monotonically increases until it reaches a
+      maximum value of 2^64-1 (18446744073709551615 decimal),
+      when it wraps around and starts increasing again from zero.
+
+      Counters have no defined 'initial' value, and thus, a
+      single value of a counter has (in general) no information
+      content.  Discontinuities in the monotonically increasing
+      value normally occur at re-initialization of the
+      management system, and at other times as specified in the
+      description of a schema node using this type.  If such
+      other times can occur, for example, the creation of
+      a schema node of type counter64 at times other than
+      re-initialization, then a corresponding schema node
+      should be defined, with an appropriate type, to indicate
+      the last discontinuity.
+
+      The counter64 type should not be used for configuration
+      schema nodes.  A default statement SHOULD NOT be used in
+      combination with the type counter64.
+
+      In the value set and its semantics, this type is equivalent
+      to the Counter64 type of the SMIv2.";
+    reference
+     "RFC 2578: Structure of Management Information Version 2
+                (SMIv2)";
+  }
+
+  typedef zero-based-counter64 {
+    type yang:counter64;
+    default "0";
+    description
+     "The zero-based-counter64 type represents a counter64 that
+      has the defined 'initial' value zero.
+
+      A schema node of this type will be set to zero (0) on creation
+      and will thereafter increase monotonically until it reaches
+      a maximum value of 2^64-1 (18446744073709551615 decimal),
+      when it wraps around and starts increasing again from zero.
+
+      Provided that an application discovers a new schema node
+      of this type within the minimum time to wrap, it can use the
+      'initial' value as a delta.  It is important for a management
+      station to be aware of this minimum time and the actual time
+      between polls, and to discard data if the actual time is too
+      long or there is no defined minimum time.
+
+      In the value set and its semantics, this type is equivalent
+      to the ZeroBasedCounter64 textual convention of the SMIv2.";
+    reference
+     "RFC 2856: Textual Conventions for Additional High Capacity
+                Data Types";
+  }
+
+  typedef gauge32 {
+    type uint32;
+    description
+     "The gauge32 type represents a non-negative integer, which
+      may increase or decrease, but shall never exceed a maximum
+      value, nor fall below a minimum value.  The maximum value
+      cannot be greater than 2^32-1 (4294967295 decimal), and
+      the minimum value cannot be smaller than 0.  The value of
+      a gauge32 has its maximum value whenever the information
+      being modeled is greater than or equal to its maximum
+      value, and has its minimum value whenever the information
+      being modeled is smaller than or equal to its minimum value.
+      If the information being modeled subsequently decreases
+      below (increases above) the maximum (minimum) value, the
+      gauge32 also decreases (increases).
+
+      In the value set and its semantics, this type is equivalent
+      to the Gauge32 type of the SMIv2.";
+    reference
+     "RFC 2578: Structure of Management Information Version 2
+                (SMIv2)";
+  }
+
+  typedef gauge64 {
+    type uint64;
+    description
+     "The gauge64 type represents a non-negative integer, which
+      may increase or decrease, but shall never exceed a maximum
+      value, nor fall below a minimum value.  The maximum value
+      cannot be greater than 2^64-1 (18446744073709551615), and
+      the minimum value cannot be smaller than 0.  The value of
+      a gauge64 has its maximum value whenever the information
+      being modeled is greater than or equal to its maximum
+      value, and has its minimum value whenever the information
+      being modeled is smaller than or equal to its minimum value.
+      If the information being modeled subsequently decreases
+      below (increases above) the maximum (minimum) value, the
+      gauge64 also decreases (increases).
+
+      In the value set and its semantics, this type is equivalent
+      to the CounterBasedGauge64 SMIv2 textual convention defined
+      in RFC 2856";
+    reference
+     "RFC 2856: Textual Conventions for Additional High Capacity
+                Data Types";
+  }
+
+  /*** collection of identifier-related types ***/
+
+  typedef object-identifier {
+    type string {
+      pattern '(([0-1](\.[1-3]?[0-9]))|(2\.(0|([1-9]\d*))))'
+            + '(\.(0|([1-9]\d*)))*';
+    }
+    description
+     "The object-identifier type represents administratively
+      assigned names in a registration-hierarchical-name tree.
+
+      Values of this type are denoted as a sequence of numerical
+      non-negative sub-identifier values.  Each sub-identifier
+      value MUST NOT exceed 2^32-1 (4294967295).  Sub-identifiers
+      are separated by single dots and without any intermediate
+      whitespace.
+
+      The ASN.1 standard restricts the value space of the first
+      sub-identifier to 0, 1, or 2.  Furthermore, the value space
+      of the second sub-identifier is restricted to the range
+      0 to 39 if the first sub-identifier is 0 or 1.  Finally,
+      the ASN.1 standard requires that an object identifier
+      has always at least two sub-identifiers.  The pattern
+      captures these restrictions.
+
+      Although the number of sub-identifiers is not limited,
+      module designers should realize that there may be
+      implementations that stick with the SMIv2 limit of 128
+      sub-identifiers.
+
+      This type is a superset of the SMIv2 OBJECT IDENTIFIER type
+      since it is not restricted to 128 sub-identifiers.  Hence,
+      this type SHOULD NOT be used to represent the SMIv2 OBJECT
+      IDENTIFIER type; the object-identifier-128 type SHOULD be
+      used instead.";
+    reference
+     "ISO9834-1: Information technology -- Open Systems
+      Interconnection -- Procedures for the operation of OSI
+      Registration Authorities: General procedures and top
+      arcs of the ASN.1 Object Identifier tree";
+  }
+
+  typedef object-identifier-128 {
+    type object-identifier {
+      pattern '\d*(\.\d*){1,127}';
+    }
+    description
+     "This type represents object-identifiers restricted to 128
+      sub-identifiers.
+
+      In the value set and its semantics, this type is equivalent
+      to the OBJECT IDENTIFIER type of the SMIv2.";
+    reference
+     "RFC 2578: Structure of Management Information Version 2
+                (SMIv2)";
+  }
+
+  typedef yang-identifier {
+    type string {
+      length "1..max";
+      pattern '[a-zA-Z_][a-zA-Z0-9\-_.]*';
+      pattern '.|..|[^xX].*|.[^mM].*|..[^lL].*';
+    }
+    description
+      "A YANG identifier string as defined by the 'identifier'
+       rule in Section 12 of RFC 6020.  An identifier must
+       start with an alphabetic character or an underscore
+       followed by an arbitrary sequence of alphabetic or
+       numeric characters, underscores, hyphens, or dots.
+
+       A YANG identifier MUST NOT start with any possible
+       combination of the lowercase or uppercase character
+       sequence 'xml'.";
+    reference
+      "RFC 6020: YANG - A Data Modeling Language for the Network
+                 Configuration Protocol (NETCONF)";
+  }
+
+  /*** collection of types related to date and time***/
+
+  typedef date-and-time {
+    type string {
+      pattern '\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?'
+            + '(Z|[\+\-]\d{2}:\d{2})';
+    }
+    description
+     "The date-and-time type is a profile of the ISO 8601
+      standard for representation of dates and times using the
+      Gregorian calendar.  The profile is defined by the
+      date-time production in Section 5.6 of RFC 3339.
+
+      The date-and-time type is compatible with the dateTime XML
+      schema type with the following notable exceptions:
+
+      (a) The date-and-time type does not allow negative years.
+
+      (b) The date-and-time time-offset -00:00 indicates an unknown
+          time zone (see RFC 3339) while -00:00 and +00:00 and Z
+          all represent the same time zone in dateTime.
+
+      (c) The canonical format (see below) of data-and-time values
+          differs from the canonical format used by the dateTime XML
+          schema type, which requires all times to be in UTC using
+          the time-offset 'Z'.
+
+      This type is not equivalent to the DateAndTime textual
+      convention of the SMIv2 since RFC 3339 uses a different
+      separator between full-date and full-time and provides
+      higher resolution of time-secfrac.
+
+      The canonical format for date-and-time values with a known time
+      zone uses a numeric time zone offset that is calculated using
+      the device's configured known offset to UTC time.  A change of
+      the device's offset to UTC time will cause date-and-time values
+      to change accordingly.  Such changes might happen periodically
+      in case a server follows automatically daylight saving time
+      (DST) time zone offset changes.  The canonical format for
+      date-and-time values with an unknown time zone (usually
+      referring to the notion of local time) uses the time-offset
+      -00:00.";
+    reference
+     "RFC 3339: Date and Time on the Internet: Timestamps
+      RFC 2579: Textual Conventions for SMIv2
+      XSD-TYPES: XML Schema Part 2: Datatypes Second Edition";
+  }
+
+  typedef timeticks {
+    type uint32;
+    description
+     "The timeticks type represents a non-negative integer that
+      represents the time, modulo 2^32 (4294967296 decimal), in
+      hundredths of a second between two epochs.  When a schema
+      node is defined that uses this type, the description of
+      the schema node identifies both of the reference epochs.
+
+      In the value set and its semantics, this type is equivalent
+      to the TimeTicks type of the SMIv2.";
+    reference
+     "RFC 2578: Structure of Management Information Version 2
+                (SMIv2)";
+  }
+
+  typedef timestamp {
+    type yang:timeticks;
+    description
+     "The timestamp type represents the value of an associated
+      timeticks schema node at which a specific occurrence
+      happened.  The specific occurrence must be defined in the
+      description of any schema node defined using this type.  When
+      the specific occurrence occurred prior to the last time the
+      associated timeticks attribute was zero, then the timestamp
+      value is zero.  Note that this requires all timestamp values
+      to be reset to zero when the value of the associated timeticks
+      attribute reaches 497+ days and wraps around to zero.
+
+      The associated timeticks schema node must be specified
+      in the description of any schema node using this type.
+
+      In the value set and its semantics, this type is equivalent
+      to the TimeStamp textual convention of the SMIv2.";
+    reference
+     "RFC 2579: Textual Conventions for SMIv2";
+  }
+
+  /*** collection of generic address types ***/
+
+  typedef phys-address {
+    type string {
+      pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?';
+    }
+
+    description
+     "Represents media- or physical-level addresses represented
+      as a sequence octets, each octet represented by two hexadecimal
+      numbers.  Octets are separated by colons.  The canonical
+      representation uses lowercase characters.
+
+      In the value set and its semantics, this type is equivalent
+      to the PhysAddress textual convention of the SMIv2.";
+    reference
+     "RFC 2579: Textual Conventions for SMIv2";
+  }
+
+  typedef mac-address {
+    type string {
+      pattern '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}';
+    }
+    description
+     "The mac-address type represents an IEEE 802 MAC address.
+      The canonical representation uses lowercase characters.
+
+      In the value set and its semantics, this type is equivalent
+      to the MacAddress textual convention of the SMIv2.";
+    reference
+     "IEEE 802: IEEE Standard for Local and Metropolitan Area
+                Networks: Overview and Architecture
+      RFC 2579: Textual Conventions for SMIv2";
+  }
+
+  /*** collection of XML-specific types ***/
+
+  typedef xpath1.0 {
+    type string;
+    description
+     "This type represents an XPATH 1.0 expression.
+
+      When a schema node is defined that uses this type, the
+      description of the schema node MUST specify the XPath
+      context in which the XPath expression is evaluated.";
+    reference
+     "XPATH: XML Path Language (XPath) Version 1.0";
+  }
+
+  /*** collection of string types ***/
+
+  typedef hex-string {
+    type string {
+      pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?';
+    }
+    description
+     "A hexadecimal string with octets represented as hex digits
+      separated by colons.  The canonical representation uses
+      lowercase characters.";
+  }
+
+  typedef uuid {
+    type string {
+      pattern '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-'
+            + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12}';
+    }
+    description
+     "A Universally Unique IDentifier in the string representation
+      defined in RFC 4122.  The canonical representation uses
+      lowercase characters.
+
+      The following is an example of a UUID in string representation:
+      f81d4fae-7dec-11d0-a765-00a0c91e6bf6
+      ";
+    reference
+     "RFC 4122: A Universally Unique IDentifier (UUID) URN
+                Namespace";
+  }
+
+  typedef dotted-quad {
+    type string {
+      pattern
+        '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+      + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])';
+    }
+    description
+      "An unsigned 32-bit number expressed in the dotted-quad
+       notation, i.e., four octets written as decimal numbers
+       and separated with the '.' (full stop) character.";
+  }
+}
diff --git a/src/nbi/service/rest_server/nbi_plugins/tfs_api/Resources.py b/src/nbi/service/rest_server/nbi_plugins/tfs_api/Resources.py
index ce60bdea3a7ab08b8dc24dd2e7c2efe4ecf81ae0..f360e318127706b4b4c8fdc4130dfdfc0ba711c0 100644
--- a/src/nbi/service/rest_server/nbi_plugins/tfs_api/Resources.py
+++ b/src/nbi/service/rest_server/nbi_plugins/tfs_api/Resources.py
@@ -15,33 +15,71 @@
 import json
 from flask.json import jsonify
 from flask_restful import Resource, request
+from werkzeug.exceptions import BadRequest
 from common.proto.context_pb2 import Empty
 from common.tools.grpc.Tools import grpc_message_to_json
 from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
 from service.client.ServiceClient import ServiceClient
+from slice.client.SliceClient import SliceClient
 from .Tools import (
-    format_grpc_to_json, grpc_connection_id, grpc_context_id, grpc_device_id, grpc_link_id, grpc_policy_rule_id,
-    grpc_service_id, grpc_service, grpc_slice_id, grpc_topology_id)
+    format_grpc_to_json, grpc_connection_id, grpc_context, grpc_context_id, grpc_device,
+    grpc_device_id, grpc_link, grpc_link_id, grpc_policy_rule_id,
+    grpc_service_id, grpc_service, grpc_slice, grpc_slice_id, grpc_topology, grpc_topology_id
+)
 
 class _Resource(Resource):
     def __init__(self) -> None:
         super().__init__()
-        self.client = ContextClient()
+        self.context_client = ContextClient()
+        self.device_client  = DeviceClient()
         self.service_client = ServiceClient()
+        self.slice_client   = SliceClient()
 
 class ContextIds(_Resource):
     def get(self):
-        return format_grpc_to_json(self.client.ListContextIds(Empty()))
+        return format_grpc_to_json(self.context_client.ListContextIds(Empty()))
 
 class Contexts(_Resource):
     def get(self):
-        return format_grpc_to_json(self.client.ListContexts(Empty()))
+        return format_grpc_to_json(self.context_client.ListContexts(Empty()))
+
+    def post(self):
+        json_requests = request.get_json()
+        if 'contexts' in json_requests:
+            json_requests = json_requests['contexts']
+        return [
+            format_grpc_to_json(self.context_client.SetContext(grpc_context(context)))
+            for context in json_requests
+        ]
+
+class Context(_Resource):
+    def get(self, context_uuid : str):
+        return format_grpc_to_json(self.context_client.GetContext(grpc_context_id(context_uuid)))
+
+    def put(self, context_uuid : str):
+        context = request.get_json()
+        if context_uuid != context['context_id']['context_uuid']['uuid']:
+            raise BadRequest('Mismatching context_uuid')
+        return format_grpc_to_json(self.context_client.SetContext(grpc_context(context)))
+
+    def delete(self, context_uuid : str):
+        return format_grpc_to_json(self.context_client.RemoveContext(grpc_context_id(context_uuid)))
 
 class DummyContexts(_Resource):
     def get(self):
-        contexts = grpc_message_to_json(self.client.ListContexts(Empty()), use_integers_for_enums=True)['contexts']
-        devices = grpc_message_to_json(self.client.ListDevices(Empty()), use_integers_for_enums=True)['devices']
-        links = grpc_message_to_json(self.client.ListLinks(Empty()), use_integers_for_enums=True)['links']
+        contexts = grpc_message_to_json(
+            self.context_client.ListContexts(Empty()),
+            use_integers_for_enums=True
+        )['contexts']
+        devices = grpc_message_to_json(
+            self.context_client.ListDevices(Empty()),
+            use_integers_for_enums=True
+        )['devices']
+        links = grpc_message_to_json(
+            self.context_client.ListLinks(Empty()),
+            use_integers_for_enums=True
+        )['links']
 
         topologies  = list()
         slices      = list()
@@ -53,17 +91,17 @@ class DummyContexts(_Resource):
             context_id = grpc_context_id(context_uuid)
 
             topologies.extend(grpc_message_to_json(
-                self.client.ListTopologies(context_id),
+                self.context_client.ListTopologies(context_id),
                 use_integers_for_enums=True
             )['topologies'])
 
             slices.extend(grpc_message_to_json(
-                self.client.ListSlices(context_id),
+                self.context_client.ListSlices(context_id),
                 use_integers_for_enums=True
             )['slices'])
 
             context_services = grpc_message_to_json(
-                self.client.ListServices(context_id),
+                self.context_client.ListServices(context_id),
                 use_integers_for_enums=True
             )['services']
             services.extend(context_services)
@@ -72,7 +110,7 @@ class DummyContexts(_Resource):
                 service_uuid = service['service_id']['service_uuid']['uuid']
                 service_id = grpc_service_id(context_uuid, service_uuid)
                 connections.extend(grpc_message_to_json(
-                    self.client.ListConnections(service_id),
+                    self.context_client.ListConnections(service_id),
                     use_integers_for_enums=True
                 )['connections'])
 
@@ -97,115 +135,191 @@ class DummyContexts(_Resource):
         if len(connections) > 0: dummy_context['connections'] = connections
         return jsonify(dummy_context)
 
-class Context(_Resource):
-    def get(self, context_uuid : str):
-        return format_grpc_to_json(self.client.GetContext(grpc_context_id(context_uuid)))
-
 class TopologyIds(_Resource):
     def get(self, context_uuid : str):
-        return format_grpc_to_json(self.client.ListTopologyIds(grpc_context_id(context_uuid)))
+        return format_grpc_to_json(self.context_client.ListTopologyIds(grpc_context_id(context_uuid)))
 
 class Topologies(_Resource):
     def get(self, context_uuid : str):
-        return format_grpc_to_json(self.client.ListTopologies(grpc_context_id(context_uuid)))
+        return format_grpc_to_json(self.context_client.ListTopologies(grpc_context_id(context_uuid)))
+
+    def post(self, context_uuid : str):
+        json_requests = request.get_json()
+        if 'topologies' in json_requests:
+            json_requests = json_requests['topologies']
+        for topology in json_requests:
+            if context_uuid != topology['topology_id']['context_id']['context_uuid']['uuid']:
+                raise BadRequest('Mismatching context_uuid')
+        return [
+            format_grpc_to_json(self.context_client.SetTopology(grpc_topology(**topology)))
+            for topology in json_requests
+        ]
 
 class Topology(_Resource):
     def get(self, context_uuid : str, topology_uuid : str):
-        return format_grpc_to_json(self.client.GetTopology(grpc_topology_id(context_uuid, topology_uuid)))
+        return format_grpc_to_json(self.context_client.GetTopology(grpc_topology_id(context_uuid, topology_uuid)))
+
+    def put(self, context_uuid : str, topology_uuid : str):
+        topology = request.get_json()
+        if context_uuid != topology['topology_id']['context_id']['context_uuid']['uuid']:
+            raise BadRequest('Mismatching context_uuid')
+        if topology_uuid != topology['topology_id']['topology_uuid']['uuid']:
+            raise BadRequest('Mismatching topology_uuid')
+        return format_grpc_to_json(self.context_client.SetTopology(grpc_topology(topology)))
+
+    def delete(self, context_uuid : str, topology_uuid : str):
+        return format_grpc_to_json(self.context_client.RemoveTopology(grpc_topology_id(context_uuid, topology_uuid)))
 
 class ServiceIds(_Resource):
     def get(self, context_uuid : str):
-        return format_grpc_to_json(self.client.ListServiceIds(grpc_context_id(context_uuid)))
+        return format_grpc_to_json(self.context_client.ListServiceIds(grpc_context_id(context_uuid)))
 
 class Services(_Resource):
     def get(self, context_uuid : str):
-        return format_grpc_to_json(self.client.ListServices(grpc_context_id(context_uuid)))
+        return format_grpc_to_json(self.context_client.ListServices(grpc_context_id(context_uuid)))
+
+    def post(self, context_uuid : str):
+        json_requests = request.get_json()
+        if 'services' in json_requests:
+            json_requests = json_requests['services']
+        for service in json_requests:
+            if context_uuid != service['service_id']['context_id']['context_uuid']['uuid']:
+                raise BadRequest('Mismatching context_uuid')
+        return [
+            format_grpc_to_json(self.service_client.CreateService(grpc_service(**service)))
+            for service in json_requests
+        ]
 
 class Service(_Resource):
     def get(self, context_uuid : str, service_uuid : str):
-        return format_grpc_to_json(self.client.GetService(grpc_service_id(context_uuid, service_uuid)))
-
-    def post(self, context_uuid : str, service_uuid : str): # pylint: disable=unused-argument
-        service = request.get_json()['services'][0]
-        return format_grpc_to_json(self.service_client.CreateService(grpc_service(
-            service_uuid = service['service_id']['service_uuid']['uuid'],
-            service_type = service['service_type'],
-            context_uuid = service['service_id']['context_id']['context_uuid']['uuid'],
-        )))
-
-    def put(self, context_uuid : str, service_uuid : str):  # pylint: disable=unused-argument
-        service = request.get_json()['services'][0]
-        return format_grpc_to_json(self.service_client.UpdateService(grpc_service(
-            service_uuid = service['service_id']['service_uuid']['uuid'],
-            service_type = service['service_type'],
-            context_uuid = service['service_id']['context_id']['context_uuid']['uuid'],
-            status       = service['service_status']['service_status'],
-            endpoint_ids = service['service_endpoint_ids'],
-            constraints  = service['service_constraints'],
-            config_rules = service['service_config']['config_rules']
-        )))
+        return format_grpc_to_json(self.context_client.GetService(grpc_service_id(context_uuid, service_uuid)))
+
+    def put(self, context_uuid : str, service_uuid : str):
+        service = request.get_json()
+        if context_uuid != service['service_id']['context_id']['context_uuid']['uuid']:
+            raise BadRequest('Mismatching context_uuid')
+        if service_uuid != service['service_id']['service_uuid']['uuid']:
+            raise BadRequest('Mismatching service_uuid')
+        return format_grpc_to_json(self.service_client.UpdateService(grpc_service(service)))
 
     def delete(self, context_uuid : str, service_uuid : str):
-        return format_grpc_to_json(self.service_client.DeleteService(grpc_service_id(
-            context_uuid, service_uuid,
-        )))
+        return format_grpc_to_json(self.service_client.DeleteService(grpc_service_id(context_uuid, service_uuid)))
 
 class SliceIds(_Resource):
     def get(self, context_uuid : str):
-        return format_grpc_to_json(self.client.ListSliceIds(grpc_context_id(context_uuid)))
+        return format_grpc_to_json(self.context_client.ListSliceIds(grpc_context_id(context_uuid)))
 
 class Slices(_Resource):
     def get(self, context_uuid : str):
-        return format_grpc_to_json(self.client.ListSlices(grpc_context_id(context_uuid)))
+        return format_grpc_to_json(self.context_client.ListSlices(grpc_context_id(context_uuid)))
+
+    def post(self, context_uuid : str):
+        json_requests = request.get_json()
+        if 'slices' in json_requests:
+            json_requests = json_requests['slices']
+        for slice_ in json_requests:
+            if context_uuid != slice_['slice_id']['context_id']['context_uuid']['uuid']:
+                raise BadRequest('Mismatching context_uuid')
+        return [
+            format_grpc_to_json(self.slice_client.CreateSlice(grpc_slice(**slice_)))
+            for slice_ in json_requests
+        ]
 
 class Slice(_Resource):
     def get(self, context_uuid : str, slice_uuid : str):
-        return format_grpc_to_json(self.client.GetSlice(grpc_slice_id(context_uuid, slice_uuid)))
+        return format_grpc_to_json(self.context_client.GetSlice(grpc_slice_id(context_uuid, slice_uuid)))
+
+    def put(self, context_uuid : str, slice_uuid : str):
+        slice_ = request.get_json()
+        if context_uuid != slice_['slice_id']['context_id']['context_uuid']['uuid']:
+            raise BadRequest('Mismatching context_uuid')
+        if slice_uuid != slice_['slice_id']['slice_uuid']['uuid']:
+            raise BadRequest('Mismatching slice_uuid')
+        return format_grpc_to_json(self.slice_client.UpdateSlice(grpc_slice(slice_)))
+
+    def delete(self, context_uuid : str, slice_uuid : str):
+        return format_grpc_to_json(self.slice_client.DeleteSlice(grpc_slice_id(context_uuid, slice_uuid)))
 
 class DeviceIds(_Resource):
     def get(self):
-        return format_grpc_to_json(self.client.ListDeviceIds(Empty()))
+        return format_grpc_to_json(self.context_client.ListDeviceIds(Empty()))
 
 class Devices(_Resource):
     def get(self):
-        return format_grpc_to_json(self.client.ListDevices(Empty()))
+        return format_grpc_to_json(self.context_client.ListDevices(Empty()))
+
+    def post(self):
+        json_requests = request.get_json()
+        if 'devices' in json_requests:
+            json_requests = json_requests['devices']
+        return [
+            format_grpc_to_json(self.device_client.AddDevice(grpc_device(device)))
+            for device in json_requests
+        ]
 
 class Device(_Resource):
     def get(self, device_uuid : str):
-        return format_grpc_to_json(self.client.GetDevice(grpc_device_id(device_uuid)))
+        return format_grpc_to_json(self.context_client.GetDevice(grpc_device_id(device_uuid)))
+
+    def put(self, device_uuid : str):
+        device = request.get_json()
+        if device_uuid != device['device_id']['device_uuid']['uuid']:
+            raise BadRequest('Mismatching device_uuid')
+        return format_grpc_to_json(self.device_client.ConfigureDevice(grpc_device(device)))
+
+    def delete(self, device_uuid : str):
+        return format_grpc_to_json(self.device_client.DeleteDevice(grpc_device_id(device_uuid)))
 
 class LinkIds(_Resource):
     def get(self):
-        return format_grpc_to_json(self.client.ListLinkIds(Empty()))
+        return format_grpc_to_json(self.context_client.ListLinkIds(Empty()))
 
 class Links(_Resource):
     def get(self):
-        return format_grpc_to_json(self.client.ListLinks(Empty()))
+        return format_grpc_to_json(self.context_client.ListLinks(Empty()))
+
+    def post(self):
+        json_requests = request.get_json()
+        if 'links' in json_requests:
+            json_requests = json_requests['links']
+        return [
+            format_grpc_to_json(self.context_client.SetLink(grpc_link(link)))
+            for link in json_requests
+        ]
 
 class Link(_Resource):
     def get(self, link_uuid : str):
-        return format_grpc_to_json(self.client.GetLink(grpc_link_id(link_uuid)))
+        return format_grpc_to_json(self.context_client.GetLink(grpc_link_id(link_uuid)))
+
+    def put(self, link_uuid : str):
+        link = request.get_json()
+        if link_uuid != link['link_id']['link_uuid']['uuid']:
+            raise BadRequest('Mismatching link_uuid')
+        return format_grpc_to_json(self.context_client.SetLink(grpc_link(link)))
+
+    def delete(self, link_uuid : str):
+        return format_grpc_to_json(self.context_client.RemoveLink(grpc_link_id(link_uuid)))
 
 class ConnectionIds(_Resource):
     def get(self, context_uuid : str, service_uuid : str):
-        return format_grpc_to_json(self.client.ListConnectionIds(grpc_service_id(context_uuid, service_uuid)))
+        return format_grpc_to_json(self.context_client.ListConnectionIds(grpc_service_id(context_uuid, service_uuid)))
 
 class Connections(_Resource):
     def get(self, context_uuid : str, service_uuid : str):
-        return format_grpc_to_json(self.client.ListConnections(grpc_service_id(context_uuid, service_uuid)))
+        return format_grpc_to_json(self.context_client.ListConnections(grpc_service_id(context_uuid, service_uuid)))
 
 class Connection(_Resource):
     def get(self, connection_uuid : str):
-        return format_grpc_to_json(self.client.GetConnection(grpc_connection_id(connection_uuid)))
+        return format_grpc_to_json(self.context_client.GetConnection(grpc_connection_id(connection_uuid)))
 
 class PolicyRuleIds(_Resource):
     def get(self):
-        return format_grpc_to_json(self.client.ListPolicyRuleIds(Empty()))
+        return format_grpc_to_json(self.context_client.ListPolicyRuleIds(Empty()))
 
 class PolicyRules(_Resource):
     def get(self):
-        return format_grpc_to_json(self.client.ListPolicyRules(Empty()))
+        return format_grpc_to_json(self.context_client.ListPolicyRules(Empty()))
 
 class PolicyRule(_Resource):
     def get(self, policy_rule_uuid : str):
-        return format_grpc_to_json(self.client.GetPolicyRule(grpc_policy_rule_id(policy_rule_uuid)))
+        return format_grpc_to_json(self.context_client.GetPolicyRule(grpc_policy_rule_id(policy_rule_uuid)))
diff --git a/src/nbi/service/rest_server/nbi_plugins/tfs_api/Tools.py b/src/nbi/service/rest_server/nbi_plugins/tfs_api/Tools.py
index 1f69ffffb8c97a83591ec626920b57f40d032783..bb10ee375f0ecdf7b63459b300dd0ff0fed40615 100644
--- a/src/nbi/service/rest_server/nbi_plugins/tfs_api/Tools.py
+++ b/src/nbi/service/rest_server/nbi_plugins/tfs_api/Tools.py
@@ -12,21 +12,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from typing import Dict
 from flask.json import jsonify
 from common.proto.context_pb2 import (
-    ConnectionId, ContextId, DeviceId, LinkId, ServiceId, SliceId, TopologyId, Service, ServiceStatusEnum
+    ConnectionId, Context, ContextId, Device, DeviceId, Link, LinkId,
+    ServiceId, Slice, SliceId, Topology, TopologyId, Service
 )
-from common.proto.policy_pb2 import PolicyRuleId
+from common.proto.policy_pb2 import PolicyRule, PolicyRuleId
 from common.tools.grpc.Tools import grpc_message_to_json
 from common.tools.object_factory.Connection import json_connection_id
 from common.tools.object_factory.Context import json_context_id
-from common.tools.object_factory.ConfigRule import json_config_rule
-from common.tools.object_factory.Constraint import json_constraint_custom
-from common.tools.object_factory.EndPoint import json_endpoint_id
 from common.tools.object_factory.Device import json_device_id
 from common.tools.object_factory.Link import json_link_id
 from common.tools.object_factory.PolicyRule import json_policyrule_id
-from common.tools.object_factory.Service import json_service_id, json_service
+from common.tools.object_factory.Service import json_service_id
 from common.tools.object_factory.Slice import json_slice_id
 from common.tools.object_factory.Topology import json_topology_id
 
@@ -40,51 +39,41 @@ def grpc_connection_id(connection_uuid):
 def grpc_context_id(context_uuid):
     return ContextId(**json_context_id(context_uuid))
 
+def grpc_context(json_context : Dict):
+    return Context(**json_context)
+
 def grpc_device_id(device_uuid):
     return DeviceId(**json_device_id(device_uuid))
 
+def grpc_device(json_device : Dict):
+    return Device(**json_device)
+
 def grpc_link_id(link_uuid):
     return LinkId(**json_link_id(link_uuid))
 
+def grpc_link(json_link : Dict):
+    return Link(**json_link)
+
 def grpc_service_id(context_uuid, service_uuid):
     return ServiceId(**json_service_id(service_uuid, context_id=json_context_id(context_uuid)))
 
-def grpc_service(
-    service_uuid, service_type, context_uuid, status=None, endpoint_ids=None, constraints=None, config_rules=None
-):
-    json_context = json_context_id(context_uuid)
-    json_status = status if status else ServiceStatusEnum.SERVICESTATUS_PLANNED
-    json_endpoints_ids = [
-        json_endpoint_id(
-            json_device_id(endpoint_id['device_id']['device_uuid']['uuid']),
-            endpoint_id['endpoint_uuid']['uuid']
-        )
-        for endpoint_id in endpoint_ids
-    ] if endpoint_ids else []
-    json_constraints = [
-        json_constraint_custom(
-            constraint['custom']['constraint_type'],
-            constraint['custom']['constraint_value']
-        )
-        for constraint in constraints
-    ] if constraints else []
-    json_config_rules = [
-        json_config_rule(
-            config_rule['action'],
-            config_rule['custom']['resource_key'],
-            config_rule['custom']['resource_value']
-        )
-        for config_rule in config_rules
-    ] if config_rules else []
-    return Service(**json_service(
-        service_uuid, service_type, json_context, json_status,
-        json_endpoints_ids, json_constraints, json_config_rules))
+def grpc_service(json_service : Dict):
+    return Service(**json_service)
 
 def grpc_slice_id(context_uuid, slice_uuid):
     return SliceId(**json_slice_id(slice_uuid, context_id=json_context_id(context_uuid)))
-    
+
+def grpc_slice(json_slice : Dict):
+    return Slice(**json_slice)
+
 def grpc_topology_id(context_uuid, topology_uuid):
     return TopologyId(**json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)))
 
+def grpc_topology(json_topology : Dict):
+    return Topology(**json_topology)
+
 def grpc_policy_rule_id(policy_rule_uuid):
     return PolicyRuleId(**json_policyrule_id(policy_rule_uuid))
+
+def grpc_policy_rule(json_policy_rule : Dict):
+    return PolicyRule(**json_policy_rule)
diff --git a/src/nbi/tests/data/ietf_acl.json b/src/nbi/tests/data/ietf_acl.json
new file mode 100644
index 0000000000000000000000000000000000000000..072df6d01513db8e47e50ffd42fc6719a6715f77
--- /dev/null
+++ b/src/nbi/tests/data/ietf_acl.json
@@ -0,0 +1,56 @@
+{
+    "ietf-access-control-list": {
+        "acls": {
+            "acl": [
+                {
+                    "name": "sample-ipv4-acl",
+                    "type": "ipv4-acl-type",
+                    "aces": {
+                        "ace": [
+                            {
+                                "name": "rule1",
+                                "matches": {
+                                    "ipv4": {
+                                        "dscp": 18,
+                                        "source-ipv4-network": "128.32.10.6/24",
+                                        "destination-ipv4-network": "172.10.33.0/24"
+                                    },
+                                    "tcp": {
+                                        "flags": "syn",
+                                        "source-port": {
+                                            "port": 1444,
+                                            "operator": "eq"
+                                        },
+                                        "destination-port": {
+                                            "port": 1333,
+                                            "operator": "eq"
+                                        }
+                                    }
+                                },
+                                "actions": {
+                                    "forwarding": "drop"
+                                }
+                            }
+                        ]
+                    }
+                }
+            ],
+            "attachment-points": {
+                "interface": [
+                    {
+                        "interface-id": "200",
+                        "ingress": {
+                            "acl-sets": {
+                                "acl-set": [
+                                    {
+                                        "name": "sample-ipv4-acl"
+                                    }
+                                ]
+                            }
+                        }
+                    }
+                ]
+            }
+        }
+    }
+}
diff --git a/src/nbi/tests/ietf_acl_client.py b/src/nbi/tests/ietf_acl_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..155244a9261ec2a915512cd6e8f9f2df703b7868
--- /dev/null
+++ b/src/nbi/tests/ietf_acl_client.py
@@ -0,0 +1,89 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import requests, time
+from typing import Optional
+from requests.auth import HTTPBasicAuth
+
+BASE_URL = '{:s}://{:s}:{:d}/restconf/data'
+ACLS_URL = '{:s}/device={:s}/ietf-access-control-list:acls'
+ACL_URL  = '{:s}/device={:s}/ietf-access-control-list:acl={:s}'
+
+CSG1_DEVICE_UUID = '118295c8-318a-52ec-a394-529fc4b70f2f' # router: 128.32.10.1
+ACL_NAME         = 'sample-ipv4-acl'
+ACL_RULE         = {"ietf-access-control-list:acls": {
+    "acl": [{
+        "name": "sample-ipv4-acl", "type": "ipv4-acl-type",
+        "aces": {"ace": [{
+            "name": "rule1",
+            "matches": {
+                "ipv4": {
+                    "source-ipv4-network": "128.32.10.6/24",
+                    "destination-ipv4-network": "172.10.33.0/24",
+                    "dscp": 18
+                },
+                "tcp": {
+                    "source-port": {"operator": "eq", "port": 1444},
+                    "destination-port": {"operator": "eq", "port": 1333},
+                    "flags": "syn"
+                }
+            },
+            "actions": {"forwarding": "drop"}
+        }]}
+    }],
+    "attachment-points": {"interface": [{
+        "interface-id": "200",
+        "ingress": {"acl-sets": {"acl-set": [{"name": "sample-ipv4-acl"}]}}
+    }]
+}}}
+
+class TfsIetfAclClient:
+    def __init__(
+        self, host : str = 'localhost', port : int = 80, schema : str = 'http',
+        username : Optional[str] = 'admin', password : Optional[str] = 'admin',
+        timeout : int = 10, allow_redirects : bool = True, verify : bool = False
+    ) -> None:
+        self._base_url = BASE_URL.format(schema, host, port)
+        auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None
+        self._settings = dict(auth=auth, timeout=timeout, allow_redirects=allow_redirects, verify=verify)
+
+    def post(self, device_uuid : str, ietf_acl_data : dict) -> str:
+        request_url = ACLS_URL.format(self._base_url, device_uuid)
+        reply = requests.post(request_url, json=ietf_acl_data, **(self._settings))
+        return reply.text
+
+    def get(self, device_uuid : str, acl_name : str) -> str:
+        request_url = ACL_URL.format(self._base_url, device_uuid, acl_name)
+        reply = requests.get(request_url, **(self._settings))
+        return reply.text
+
+    def delete(self, device_uuid : str, acl_name : str) -> str:
+        request_url = ACL_URL.format(self._base_url, device_uuid, acl_name)
+        reply = requests.delete(request_url, **(self._settings))
+        return reply.text
+
+def main():
+    client = TfsIetfAclClient()
+    print(f'ACL rule: {ACL_RULE}')
+    post_response = client.post(CSG1_DEVICE_UUID, ACL_RULE)
+    print(f'post response: {post_response}')
+    time.sleep(.5)
+    get_response = client.get(CSG1_DEVICE_UUID, ACL_NAME)
+    print(f'get response: {get_response}')
+    time.sleep(.5)
+    delete_response = client.delete(CSG1_DEVICE_UUID, ACL_NAME)
+    print(f'delete response: {delete_response}')
+
+if __name__ == '__main__':
+    main()
diff --git a/src/nbi/tests/test_yang_acl.py b/src/nbi/tests/test_yang_acl.py
new file mode 100644
index 0000000000000000000000000000000000000000..607001870fa69e79bd7ef53fa92d88bbf353e45e
--- /dev/null
+++ b/src/nbi/tests/test_yang_acl.py
@@ -0,0 +1,104 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, json, libyang, logging, os
+from typing import Dict, List, Optional
+
+LOGGER = logging.getLogger(__name__)
+
+YANG_DIR = os.path.join(os.path.dirname(__file__), 'yang')
+YANG_MODULES = [
+    'ietf-yang-types',
+    'ietf-interfaces',
+    'iana-if-type',
+    'ietf-access-control-list',
+]
+
+class YangValidator:
+    def __init__(self) -> None:
+        self._yang_context = libyang.Context(YANG_DIR)
+        for module_name in YANG_MODULES:
+            LOGGER.info('Loading module: {:s}'.format(str(module_name)))
+            yang_module = self._yang_context.load_module(module_name)
+            yang_module.feature_enable_all()
+            yang_module_prefix = yang_module.prefix()
+            LOGGER.info('  Prefix: {:s}'.format(str(yang_module_prefix)))
+
+    def parse_to_dict(self, message : Dict, interface_names : List[str]) -> Dict:
+        interfaces = self._yang_context.create_data_path('/ietf-interfaces:interfaces')
+        for if_index,interface_name in enumerate(interface_names):
+            if_path = 'interface[name="{:s}"]'.format(str(interface_name))
+            interface = interfaces.create_path(if_path)
+            interface.create_path('if-index', if_index + 1)
+            interface.create_path('type', 'iana-if-type:ethernetCsmacd')
+            interface.create_path('admin-status', 'up')
+            interface.create_path('oper-status', 'up')
+            statistics = interface.create_path('statistics')
+            statistics.create_path('discontinuity-time', '2024-07-11T10:00:00.000000Z')
+
+        message = copy.deepcopy(message)
+        message['ietf-interfaces:interfaces'] = interfaces.print_dict()['interfaces']
+
+        dnode : Optional[libyang.DNode] = self._yang_context.parse_data_mem(
+            json.dumps(message), 'json', validate_present=True, strict=True
+        )
+        if dnode is None: raise Exception('Unable to parse Message({:s})'.format(str(message)))
+        message = dnode.print_dict()
+        dnode.free()
+        interfaces.free()
+        return message
+
+    def destroy(self) -> None:
+        self._yang_context.destroy()
+        self._yang_context = None
+
+def main() -> None:
+    import uuid # pylint: disable=import-outside-toplevel
+    logging.basicConfig(level=logging.DEBUG)
+
+    interface_names = {'200', '500', str(uuid.uuid4()), str(uuid.uuid4())}
+    ACL_RULE = {"ietf-access-control-list:acls": {
+        "acl": [{
+            "name": "sample-ipv4-acl", "type": "ipv4-acl-type",
+            "aces": {"ace": [{
+                "name": "rule1",
+                "matches": {
+                    "ipv4": {
+                        "source-ipv4-network": "128.32.10.6/24",
+                        "destination-ipv4-network": "172.10.33.0/24",
+                        "dscp": 18
+                    },
+                    "tcp": {
+                        "source-port": {"operator": "eq", "port": 1444},
+                        "destination-port": {"operator": "eq", "port": 1333},
+                        "flags": "syn"
+                    }
+                },
+                "actions": {"forwarding": "drop"}
+            }]}
+        }],
+        "attachment-points": {"interface": [{
+            "interface-id": "200",
+            "ingress": {"acl-sets": {"acl-set": [{"name": "sample-ipv4-acl"}]}}
+        }]
+    }}}
+
+    yang_validator = YangValidator()
+    request_data = yang_validator.parse_to_dict(ACL_RULE, list(interface_names))
+    yang_validator.destroy()
+
+    LOGGER.info('request_data = {:s}'.format(str(request_data)))
+
+if __name__ == '__main__':
+    main()
diff --git a/src/opticalattackdetector/requirements.in b/src/opticalattackdetector/requirements.in
index 39982773b7bbd14a680aa3b26173e8fbcecd88fd..e8476e9faebacd73ff570de43f6417f4f32e23a0 100644
--- a/src/opticalattackdetector/requirements.in
+++ b/src/opticalattackdetector/requirements.in
@@ -12,5 +12,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-numpy
+numpy<2.0.0
 redis
diff --git a/src/opticalcontroller/requirements.in b/src/opticalcontroller/requirements.in
index 0b1947bee2c7f1e89491dff4f7589d3465d28c38..4732ee635a60b8320e25cd2c26388d1cfdfd25cc 100644
--- a/src/opticalcontroller/requirements.in
+++ b/src/opticalcontroller/requirements.in
@@ -17,5 +17,5 @@ flask-restplus==0.13.0
 itsdangerous==1.1.0
 Jinja2==2.11.3
 MarkupSafe==1.1.1
-numpy==1.23.0
+numpy<2.0.0
 Werkzeug==0.16.1
diff --git a/src/pathcomp/frontend/requirements.in b/src/pathcomp/frontend/requirements.in
index 0466b25dc1e326d72735c02aa9b581264dd02620..602ecff548366217e24331721bd0bec7afff8e04 100644
--- a/src/pathcomp/frontend/requirements.in
+++ b/src/pathcomp/frontend/requirements.in
@@ -13,6 +13,7 @@
 # limitations under the License.
 
 
+numpy<2.0.0
 pandas==1.5.*
 requests==2.27.1
 scikit-learn==1.1.*
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py
index 0c98254729afd0b2089c84499a9c739e985b27f5..f92f9b2fff11ab585813ab59e07c463f361413d2 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py
@@ -184,6 +184,10 @@ def compose_device_config_rules(
 
                 device_endpoint_keys = set(itertools.product(device_keys, endpoint_keys))
                 if len(device_endpoint_keys.intersection(endpoints_traversed)) == 0: continue
+
+                # TODO: check if vlan needs to be removed from config_rule
+                #config_rule.custom.resource_key = re.sub('\/vlan\[[^\]]+\]', '', config_rule.custom.resource_key)
+
                 subservice_config_rules.append(config_rule)
         else:
             continue
diff --git a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py
index 4e9ceabc3add1bae949f751f36e2a6f8cb237fa6..7527932877f47a092a35f286defb3744e05db109 100644
--- a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py
+++ b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py
@@ -12,24 +12,35 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Dict, List, Tuple
+from typing import Any, Dict, List, Optional, Tuple
 from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
 from service.service.service_handler_api.AnyTreeTools import TreeNode
 
+def get_value(field_name : str, *containers, default=None) -> Optional[Any]:
+    if len(containers) == 0: raise Exception('No containers specified')
+    for container in containers:
+        if field_name not in container: continue
+        return container[field_name]
+    return default
+
 def setup_config_rules(
     service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str,
-    service_settings : TreeNode, endpoint_settings : TreeNode, endpoint_acls : List [Tuple]
+    service_settings : TreeNode, device_settings : TreeNode, endpoint_settings : TreeNode, endpoint_acls : List [Tuple]
 ) -> List[Dict]:
 
     if service_settings  is None: return []
+    if device_settings   is None: return []
     if endpoint_settings is None: return []
 
     json_settings          : Dict = service_settings.value
+    json_device_settings   : Dict = device_settings.value
     json_endpoint_settings : Dict = endpoint_settings.value
 
-    mtu                       = json_settings.get('mtu',                          1450     )  # 1512
+    settings = (json_settings, json_endpoint_settings, json_device_settings)
+
+    mtu                       = get_value('mtu', *settings, default=1450)   # 1512
     #address_families         = json_settings.get('address_families',             []       )  # ['IPV4']
-    bgp_as                    = json_settings.get('bgp_as',                       65000    )  # 65000
+    bgp_as                    = get_value('bgp_as', *settings, default=65000)   # 65000
 
     router_id                 = json_endpoint_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
     route_distinguisher       = json_settings.get('route_distinguisher',          '65000:101'    )  # '60001:801'
@@ -76,6 +87,7 @@ def setup_config_rules(
                 'name': network_instance_name, 
                 'protocol_name': 'BGP', 
                 'identifier': 'BGP', 
+                'type': 'L3VRF',
                 'as': bgp_as,
                 'router_id': router_id, 
         }),
@@ -88,7 +100,6 @@ def setup_config_rules(
                 'protocol_name': 'DIRECTLY_CONNECTED',
         }),
 
-        
         #Add STATIC protocol to network instance
         json_config_rule_set(
             '/network_instance[{:s}]/protocols[STATIC]'.format(network_instance_name), {
@@ -114,6 +125,7 @@ def setup_config_rules(
         json_config_rule_set(
             '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), {
                 'name'        : network_instance_name, 
+                'type'        : 'L3VRF',
                 'id'          : if_subif_name, 
                 'interface'   : if_subif_name,
                 'subinterface': sub_interface_index,
@@ -183,6 +195,7 @@ def setup_config_rules(
         }),
 
     ]
+
     for res_key, res_value in endpoint_acls:
         json_config_rules.append(
                {'action': 1, 'acl': res_value}
@@ -191,23 +204,27 @@ def setup_config_rules(
 
 def teardown_config_rules(
     service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str,
-    service_settings : TreeNode, endpoint_settings : TreeNode
+    service_settings : TreeNode, device_settings : TreeNode, endpoint_settings : TreeNode
 ) -> List[Dict]:
 
     if service_settings  is None: return []
+    if device_settings   is None: return []
     if endpoint_settings is None: return []
 
     json_settings          : Dict = service_settings.value
+    json_device_settings   : Dict = device_settings.value
     json_endpoint_settings : Dict = endpoint_settings.value
 
+    settings = (json_settings, json_endpoint_settings, json_device_settings)
+
     service_short_uuid        = service_uuid.split('-')[-1]
     network_instance_name     = '{:s}-NetInst'.format(service_short_uuid)
     #network_interface_desc    = '{:s}-NetIf'.format(service_uuid)
     #network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid)
 
-    #mtu                 = json_settings.get('mtu',                          1450     )  # 1512
+    #mtu                       = get_value('mtu', *settings, default=1450)   # 1512
     #address_families    = json_settings.get('address_families',             []       )  # ['IPV4']
-    #bgp_as              = json_settings.get('bgp_as',                       65000    )  # 65000
+    #bgp_as                    = get_value('bgp_as', *settings, default=65000)   # 65000
     route_distinguisher = json_settings.get('route_distinguisher',          '0:0'    )  # '60001:801'
     #sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
     #router_id           = json_endpoint_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
diff --git a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules_test_ocnos.py b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules_test_ocnos.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fa1d0b5b6931902b4ac50847c90bf67738032ba
--- /dev/null
+++ b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules_test_ocnos.py
@@ -0,0 +1,337 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, List, Tuple
+from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
+from service.service.service_handler_api.AnyTreeTools import TreeNode
+
+def setup_config_rules(
+    service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str,
+    service_settings : TreeNode, device_settings : TreeNode, endpoint_settings : TreeNode, endpoint_acls : List [Tuple]
+) -> List[Dict]:
+
+    if service_settings  is None: return []
+    if device_settings   is None: return []
+    if endpoint_settings is None: return []
+
+    json_settings          : Dict = service_settings.value
+    json_device_settings   : Dict = device_settings.value
+    json_endpoint_settings : Dict = endpoint_settings.value
+
+    mtu                       = json_settings.get('mtu',                          1450     )  # 1512
+    #address_families         = json_settings.get('address_families',             []       )  # ['IPV4']
+    bgp_as                    = json_device_settings.get('bgp_as',                       65000    )  # 65000
+
+    router_id                 = json_device_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
+    route_distinguisher       = json_device_settings.get('route_distinguisher',          '65000:101'    )  # '60001:801'
+    sub_interface_index       = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
+    vlan_id                   = json_endpoint_settings.get('vlan_id',             1        )  # 400
+    address_ip                = json_endpoint_settings.get('address_ip',          '0.0.0.0')  # '2.2.2.1'
+    address_prefix            = json_endpoint_settings.get('address_prefix',      24       )  # 30
+
+    policy_import             = json_device_settings.get('policy_AZ',            '2'     )  # 2
+    policy_export             = json_device_settings.get('policy_ZA',            '7'     )  # 30
+    #network_interface_desc    = '{:s}-NetIf'.format(service_uuid)
+    network_interface_desc    = json_endpoint_settings.get('ni_description','')
+    #network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid)
+    network_subinterface_desc = json_endpoint_settings.get('subif_description','')
+    #service_short_uuid       = service_uuid.split('-')[-1]
+    #network_instance_name    = '{:s}-NetInst'.format(service_short_uuid)
+    network_instance_name     = json_endpoint_settings.get('ni_name',          service_uuid.split('-')[-1])  #ELAN-AC:1
+
+    self_bgp_if_name          = json_device_settings.get('self_bgp_interface_name', '')
+    self_bgp_address_ip       = json_device_settings.get('self_bgp_interface_address_ip', '')
+    bgp_address_prefix        = json_device_settings.get('bgp_interface_address_prefix', '')
+    bgp_sub_interface_index   = json_device_settings.get('self_bgp_sub_interface_index', 0)
+    neighbor_bgp_if_address_ip= json_device_settings.get('neighbor_bgp_interface_address_ip', '0.0.0.0')  # '2.2.2.1' 
+
+    # if_subif_name       = '{:s}.{:d}'.format(endpoint_name, 0)
+    if_subif_name       = '{:s}'.format(endpoint_name[5:])
+
+    json_config_rules = [
+        # Configure Interface (not used)
+        #json_config_rule_set(
+        #    '/interface[{:s}]'.format(endpoint_name), {
+        #        'name': endpoint_name, 
+        #        'description': network_interface_desc, 
+        #        'mtu': mtu,
+        #}),
+
+        #Create network instance
+        json_config_rule_set(
+            '/network_instance[{:s}]'.format(network_instance_name), {
+                'name': network_instance_name, 
+                'description': network_interface_desc, 
+                'type': 'L3VRF',
+                'route_distinguisher': route_distinguisher,
+                'router_id': router_id,
+                #'address_families': address_families,
+        }),
+
+        #Add BGP protocol to network instance
+        json_config_rule_set(
+            '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), {
+                'name': network_instance_name, 
+                'protocol_name': bgp_as, 
+                'identifier': 'BGP', 
+                'type': 'L3VRF',
+                'as': bgp_as,
+                'router_id': router_id, 
+                'neighbors': [{'ip_address': neighbor_bgp_if_address_ip, 'remote_as': bgp_as}]
+        }),
+
+        #Add DIRECTLY CONNECTED protocol to network instance
+        json_config_rule_set(
+            '/network_instance[{:s}]/protocols[DIRECTLY_CONNECTED]'.format(network_instance_name), {
+                'name': network_instance_name, 
+                'identifier': 'DIRECTLY_CONNECTED', 
+                'protocol_name': 'DIRECTLY_CONNECTED',
+        }),
+
+        #Add STATIC protocol to network instance
+        json_config_rule_set(
+            '/network_instance[{:s}]/protocols[STATIC]'.format(network_instance_name), {
+                'name': network_instance_name, 
+                'identifier': 'STATIC', 
+                'protocol_name': 'STATIC',
+        }),
+
+        #Create interface with subinterface (without IP address)
+        json_config_rule_set(
+            '/interface[{:s}]/subinterface[{:d}]'.format(if_subif_name, sub_interface_index), {
+                'name'       : if_subif_name,
+                'type'       :'ethernetCsmacd',
+                'mtu'        : mtu,
+                'index'      : sub_interface_index,
+                'description': network_subinterface_desc, 
+                'vlan_id'    : vlan_id,
+        }),
+
+        #Associate interface to network instance
+        json_config_rule_set(
+            '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), {
+                'name'          : network_instance_name, 
+                'type'          : 'L3VRF',
+                'id'            : if_subif_name, 
+                'interface'     : if_subif_name,
+                'subinterface'  : sub_interface_index,
+                'address_ip'    : address_ip, 
+                'address_prefix': address_prefix,
+        }), 
+
+        #Create interface with subinterface (with IP address)
+        json_config_rule_set(
+            '/interface[{:s}]/subinterface[{:d}]'.format(if_subif_name, sub_interface_index), {
+                'name'          : if_subif_name,
+                'type'          :'ethernetCsmacd',
+                'mtu'           : mtu,
+                'index'         : sub_interface_index,
+                'description'   : network_subinterface_desc, 
+                'vlan_id'       : vlan_id,
+                'address_ip'    : address_ip, 
+                'address_prefix': address_prefix,
+        }),
+
+        json_config_rule_set(
+            '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, self_bgp_if_name), {
+                'name'        : network_instance_name, 
+                'type'        : 'L3VRF',
+                'id'          : self_bgp_if_name, 
+                'interface'   : self_bgp_if_name,
+                'subinterface': bgp_sub_interface_index,
+                'address_ip'    : self_bgp_address_ip, 
+                'address_prefix': bgp_address_prefix,
+        }), 
+
+        #Create routing policy
+        json_config_rule_set(
+            '/routing_policy/bgp_defined_set[{:s}_rt_import][{:s}]'.format(policy_import,route_distinguisher), {
+                'ext_community_set_name': 'set_{:s}'.format(policy_import),
+                'ext_community_member'  : route_distinguisher,
+        }),
+        json_config_rule_set(
+            # pylint: disable=duplicate-string-formatting-argument
+            '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format(policy_import, policy_import), {
+                'policy_name'           : policy_import,
+                'statement_name'        : 'stm_{:s}'.format(policy_import), # OCNOS: '10',
+                'ext_community_set_name': 'set_{:s}'.format(policy_import),
+                'policy_result'         : 'ACCEPT_ROUTE',
+        }),
+
+        #Associate routing policy to network instance
+        json_config_rule_set(
+            '/network_instance[{:s}]/inter_instance_policies[{:s}]'.format(network_instance_name, policy_import), {
+                'name'         : network_instance_name,
+                'import_policy': policy_import,
+        }),
+
+        #Create routing policy
+        json_config_rule_set(
+            '/routing_policy/bgp_defined_set[{:s}_rt_export][{:s}]'.format(policy_export, route_distinguisher), {
+                'ext_community_set_name': 'set_{:s}'.format(policy_export),
+                'ext_community_member'  : route_distinguisher,
+        }),
+        json_config_rule_set(
+            # pylint: disable=duplicate-string-formatting-argument
+            '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format(policy_export, policy_export), {
+                'policy_name'           : policy_export,
+                'statement_name'        : 'stm_{:s}'.format(policy_export), # OCNOS: '10',
+                'ext_community_set_name': 'set_{:s}'.format(policy_export),
+                'policy_result'         : 'ACCEPT_ROUTE',
+        }),
+
+        #Associate routing policy to network instance
+        json_config_rule_set(
+            '/network_instance[{:s}]/inter_instance_policies[{:s}]'.format(network_instance_name, policy_export),{
+                'name'         : network_instance_name,
+                'export_policy': policy_export,
+        }),
+
+        #Create table connections
+        json_config_rule_set(
+            '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format(network_instance_name), {
+                'name'                 : network_instance_name,
+                'src_protocol'         : 'DIRECTLY_CONNECTED',
+                'dst_protocol'         : 'BGP',
+                'address_family'       : 'IPV4',
+                'default_import_policy': 'ACCEPT_ROUTE',
+                'as'                   : bgp_as,
+        }),
+
+        json_config_rule_set(
+            '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), {
+                'name'                 : network_instance_name,
+                'src_protocol'         : 'STATIC',
+                'dst_protocol'         : 'BGP',
+                'address_family'       : 'IPV4',
+                'default_import_policy': 'ACCEPT_ROUTE',
+                'as'                   : bgp_as,
+        }),
+
+    ]
+
+    for res_key, res_value in endpoint_acls:
+        json_config_rules.append(
+               {'action': 1, 'acl': res_value}
+            )
+    return json_config_rules
+
+def teardown_config_rules(
+    service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str,
+    service_settings : TreeNode, device_settings : TreeNode, endpoint_settings : TreeNode
+) -> List[Dict]:
+
+    if service_settings  is None: return []
+    if device_settings   is None: return []
+    if endpoint_settings is None: return []
+
+    json_settings          : Dict = service_settings.value
+    json_device_settings   : Dict = device_settings.value
+    json_endpoint_settings : Dict = endpoint_settings.value
+
+    service_short_uuid        = service_uuid.split('-')[-1]
+    # network_instance_name     = '{:s}-NetInst'.format(service_short_uuid)
+    network_instance_name     = json_endpoint_settings.get('ni_name', service_short_uuid)  #ELAN-AC:1
+    #network_interface_desc    = '{:s}-NetIf'.format(service_uuid)
+    # network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid)
+    network_subinterface_desc = ''
+
+    mtu                 = json_settings.get('mtu',                          1450     )  # 1512
+    #address_families  = json_settings.get('address_families',             []       )  # ['IPV4']
+    #bgp_as              = json_device_settings.get('bgp_as',                       65000    )  # 65000
+    route_distinguisher = json_device_settings.get('route_distinguisher',          '0:0'    )  # '60001:801'
+    sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0        )  # 1
+    #router_id           = json_device_settings.get('router_id',           '0.0.0.0')  # '10.95.0.10'
+    vlan_id             = json_endpoint_settings.get('vlan_id',             1        )  # 400
+    address_ip          = json_endpoint_settings.get('address_ip',          '0.0.0.0')  # '2.2.2.1'
+    address_prefix      = json_endpoint_settings.get('address_prefix',      24       )  # 30
+    policy_import       = json_device_settings.get('policy_AZ',            '2'      )  # 2
+    policy_export       = json_device_settings.get('policy_ZA',            '7'      )  # 30
+
+    self_bgp_if_name          = json_device_settings.get('self_bgp_interface_name', '')
+    self_bgp_address_ip       = json_device_settings.get('self_bgp_interface_address_ip', '')
+    bgp_address_prefix        = json_device_settings.get('bgp_interface_address_prefix', '')
+    bgp_sub_interface_index   = json_device_settings.get('self_bgp_sub_interface_index', 0)
+
+    # if_subif_name             = '{:s}.{:d}'.format(endpoint_name, vlan_id)
+    if_subif_name             = '{:s}'.format(endpoint_name[5:])
+
+    json_config_rules = [
+        #Delete export routing policy 
+        json_config_rule_delete(
+            # pylint: disable=duplicate-string-formatting-argument
+            '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format(policy_export, policy_export), {
+                'policy_name'           : policy_export,
+                'statement_name'        : 'stm_{:s}'.format(policy_export), # OCNOS: '10',
+                'ext_community_set_name': 'set_{:s}'.format(policy_export),
+                'policy_result'         : 'ACCEPT_ROUTE',
+        }),
+        json_config_rule_delete(
+            '/routing_policy/bgp_defined_set[{:s}_rt_export][{:s}]'.format(policy_export, route_distinguisher), {
+                'ext_community_set_name': 'set_{:s}'.format(policy_export),
+                'ext_community_member'  : route_distinguisher,
+        }),
+
+        #Delete import routing policy 
+        json_config_rule_delete(
+            # pylint: disable=duplicate-string-formatting-argument
+            '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format(policy_import, policy_import), {
+                'policy_name'           : policy_import,
+                'statement_name'        : 'stm_{:s}'.format(policy_import), # OCNOS: '10',
+                'ext_community_set_name': 'set_{:s}'.format(policy_import),
+                'policy_result'         : 'ACCEPT_ROUTE',
+        }),
+        json_config_rule_delete(
+            '/routing_policy/bgp_defined_set[{:s}_rt_import][{:s}]'.format(policy_import, route_distinguisher), {
+                'ext_community_set_name': 'set_{:s}'.format(policy_import),
+                'ext_community_member'  : route_distinguisher,
+        }),
+
+        #Delete interface; automatically deletes:
+        # - /interface[]/subinterface[]
+        # json_config_rule_delete('/interface[{:s}]/subinterface[0]'.format(if_subif_name),
+        # {
+        #     'name': if_subif_name,
+        # }),
+
+        #Delete network instance; automatically deletes:
+        # - /network_instance[]/interface[]
+        # - /network_instance[]/protocols[]
+        # - /network_instance[]/inter_instance_policies[]
+
+        #Associate interface to network instance
+        json_config_rule_set(
+            '/network_instance[{:s}]/interface[{:s}]'.format('default', if_subif_name), {
+                'name'        : 'default', 
+                'id'          : if_subif_name, 
+                'interface'   : if_subif_name,
+                'subinterface': sub_interface_index,
+                'address_ip'    : address_ip, 
+                'address_prefix': address_prefix,
+        }), 
+        json_config_rule_set(
+            '/network_instance[{:s}]/interface[{:s}]'.format('default', self_bgp_if_name), {
+                'name'        : 'default', 
+                'id'          : self_bgp_if_name, 
+                'interface'   : self_bgp_if_name,
+                'subinterface': bgp_sub_interface_index,
+                'address_ip'    : self_bgp_address_ip, 
+                'address_prefix': bgp_address_prefix,
+        }), 
+        json_config_rule_delete('/network_instance[{:s}]'.format(network_instance_name),
+        {
+            'name': network_instance_name
+        }),
+    ]
+    return json_config_rules
diff --git a/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py b/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py
index d714946d128284d60422779bc6286b64ee0244a7..2c944bfe4772b2f55ff16d1a3c726af3ee4c6e8f 100644
--- a/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py
+++ b/src/service/service/service_handlers/l3nm_openconfig/L3NMOpenConfigServiceHandler.py
@@ -52,6 +52,7 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler):
                 device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint)
 
                 device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+                device_settings = self.__settings_handler.get_device_settings(device_obj)
                 endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid)
                 endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj)
                 endpoint_acls = self.__settings_handler.get_endpoint_acls(device_obj, endpoint_obj)
@@ -59,7 +60,7 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler):
 
                 json_config_rules = setup_config_rules(
                     service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name,
-                    settings, endpoint_settings, endpoint_acls)
+                    settings, device_settings, endpoint_settings, endpoint_acls)
 
                 if len(json_config_rules) > 0:
                     del device_obj.device_config.config_rules[:]
@@ -90,13 +91,14 @@ class L3NMOpenConfigServiceHandler(_ServiceHandler):
                 device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint)
 
                 device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+                device_settings = self.__settings_handler.get_device_settings(device_obj)
                 endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid)
                 endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj)
                 endpoint_name = endpoint_obj.name
 
                 json_config_rules = teardown_config_rules(
                     service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name,
-                    settings, endpoint_settings)
+                    settings, device_settings, endpoint_settings)
 
                 if len(json_config_rules) > 0:
                     del device_obj.device_config.config_rules[:]
diff --git a/src/slice/requirements.in b/src/slice/requirements.in
index f2e7219e38a9b76bee5c1ae9e95544d1bc38065a..158355b697b14265c7ce965953c9d75b9bfdea65 100644
--- a/src/slice/requirements.in
+++ b/src/slice/requirements.in
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 #deepdiff==5.8.*
-numpy==1.23.*
+numpy<2.0.0
 pandas==1.5.*
 questdb==1.0.1
 requests==2.27.*
diff --git a/src/telemetry/README.md b/src/telemetry/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..da43bd471c384ae9133871a097e94043f70ed7de
--- /dev/null
+++ b/src/telemetry/README.md
@@ -0,0 +1,10 @@
+# How to locally run and test Telemetry service
+
+### Pre-requisets 
+The following requirements should be fulfilled before the execuation of Telemetry service.
+
+1. verify that [telmetry_frontend.proto](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/proto/telemetry_frontend.proto) file exists and grpcs file are generated sucessfully. 
+2. virtual enviornment exist with all the required packages listed in ["requirements.in"](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/telemetry_virenv.txt) are installed sucessfully.
+3. verify the creation of required database and table.
+[DB test](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/telemetry/database/tests/managementDBtests.py) python file enlist the functions to create tables and database.
+[KPI Engine](https://labs.etsi.org/rep/tfs/controller/-/blob/feat/71-cttc-separation-of-monitoring/src/kpi_manager/service/database/KpiEngine.py) contains the DB string, update the string as per your deployment.
diff --git a/src/telemetry/__init__.py b/src/telemetry/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..234a1af6588c91f6a17f3963f69120cd6e2248d9
--- /dev/null
+++ b/src/telemetry/__init__.py
@@ -0,0 +1,15 @@
+
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/telemetry/backend/__init__.py b/src/telemetry/backend/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02
--- /dev/null
+++ b/src/telemetry/backend/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/telemetry/backend/service/TelemetryBackendService.py b/src/telemetry/backend/service/TelemetryBackendService.py
new file mode 100755
index 0000000000000000000000000000000000000000..d81be79dbe410ccbf2781816f34735f6bfe5639d
--- /dev/null
+++ b/src/telemetry/backend/service/TelemetryBackendService.py
@@ -0,0 +1,253 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import ast
+import time
+import random
+import logging
+import requests
+import threading
+from typing import Any, Tuple
+from common.proto.context_pb2 import Empty
+from confluent_kafka import Producer as KafkaProducer
+from confluent_kafka import Consumer as KafkaConsumer
+from confluent_kafka import KafkaException
+from confluent_kafka import KafkaError
+from confluent_kafka.admin import AdminClient, NewTopic
+from common.proto.telemetry_frontend_pb2 import Collector, CollectorId
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+
+LOGGER             = logging.getLogger(__name__)
+METRICS_POOL       = MetricsPool('Telemetry', 'TelemetryBackend')
+KAFKA_SERVER_IP    = '127.0.0.1:9092'
+# KAFKA_SERVER_IP    = '10.152.183.175:30092'
+ADMIN_KAFKA_CLIENT = AdminClient({'bootstrap.servers': KAFKA_SERVER_IP})
+KAFKA_TOPICS       = {'request' : 'topic_request', 'response': 'topic_response',
+                      'raw'     : 'topic_raw'    , 'labeled' : 'topic_labeled'}
+EXPORTER_ENDPOINT  = "http://10.152.183.2:9100/metrics"
+PRODUCER_CONFIG    = {'bootstrap.servers': KAFKA_SERVER_IP,}
+
+
+class TelemetryBackendService:
+    """
+    Class to listens for request on Kafka topic, fetches metrics and produces measured values to another Kafka topic.
+    """
+
+    def __init__(self):
+        LOGGER.info('Init TelemetryBackendService')
+        self.running_threads = {}
+    
+    def run_kafka_listener(self)->bool:
+        threading.Thread(target=self.kafka_listener).start()
+        return True        
+    
+    def kafka_listener(self):
+        """
+        listener for requests on Kafka topic.
+        """
+        conusmer_configs = {
+            'bootstrap.servers' : KAFKA_SERVER_IP,
+            'group.id'          : 'backend',
+            'auto.offset.reset' : 'latest'
+        }
+        # topic_request = "topic_request"
+        consumerObj = KafkaConsumer(conusmer_configs)
+        # consumerObj.subscribe([topic_request])
+        consumerObj.subscribe([KAFKA_TOPICS['request']])
+
+        while True:
+            receive_msg = consumerObj.poll(2.0)
+            if receive_msg is None:
+                # print (time.time(), " - Telemetry backend is listening on Kafka Topic: ", KAFKA_TOPICS['request'])     # added for debugging purposes
+                continue
+            elif receive_msg.error():
+                if receive_msg.error().code() == KafkaError._PARTITION_EOF:
+                    continue
+                else:
+                    print("Consumer error: {}".format(receive_msg.error()))
+                    break
+            (kpi_id, duration, interval) = ast.literal_eval(receive_msg.value().decode('utf-8'))
+            collector_id = receive_msg.key().decode('utf-8')
+            if duration == -1 and interval == -1:
+                self.terminate_collector_backend(collector_id)
+                # threading.Thread(target=self.terminate_collector_backend, args=(collector_id))
+            else:
+                self.run_initiate_collector_backend(collector_id, kpi_id, duration, interval)
+
+
+    def run_initiate_collector_backend(self, collector_id: str, kpi_id: str, duration: int, interval: int):
+        stop_event = threading.Event()
+        thread = threading.Thread(target=self.initiate_collector_backend, 
+                                  args=(collector_id, kpi_id, duration, interval, stop_event))
+        self.running_threads[collector_id] = (thread, stop_event)
+        thread.start()
+
+    def initiate_collector_backend(self, collector_id, kpi_id, duration, interval, stop_event
+                        ): # type: ignore
+        """
+        Method to receive collector request attribues and initiates collecter backend.
+        """
+        print("Initiating backend for collector: ", collector_id)
+        start_time = time.time()
+        while not stop_event.is_set():
+            if time.time() - start_time >= duration:            # condition to terminate backend
+                print("Execuation duration completed: Terminating backend: Collector Id: ", collector_id, " - ", time.time() - start_time)
+                self.generate_kafka_response(collector_id, "-1", -1)
+                # write to Kafka to send the termination confirmation.
+                break
+            # print ("Received KPI: ", kpi_id, ", Duration: ", duration, ", Fetch Interval: ", interval)
+            self.extract_kpi_value(collector_id, kpi_id)
+            # print ("Telemetry Backend running for KPI: ", kpi_id, "after FETCH INTERVAL: ", interval)
+            time.sleep(interval)
+
+    def extract_kpi_value(self, collector_id: str, kpi_id: str):
+        """
+        Method to extract kpi value.
+        """
+        measured_kpi_value = random.randint(1,100)                  # Should be extracted from exporter/stream
+        # measured_kpi_value = self.fetch_node_exporter_metrics()     # exporter extracted metric value against default KPI
+        self.generate_kafka_response(collector_id, kpi_id , measured_kpi_value)
+
+    def generate_kafka_response(self, collector_id: str, kpi_id: str, kpi_value: Any):
+        """
+        Method to write response on Kafka topic
+        """
+        # topic_response = "topic_response"
+        msg_value : Tuple [str, Any] = (kpi_id, kpi_value)
+        msg_key    = collector_id
+        producerObj = KafkaProducer(PRODUCER_CONFIG)
+        # producerObj.produce(topic_response, key=msg_key, value= str(msg_value), callback=self.delivery_callback)
+        producerObj.produce(KAFKA_TOPICS['response'], key=msg_key, value= str(msg_value), callback=TelemetryBackendService.delivery_callback)
+        producerObj.flush()
+
+    def terminate_collector_backend(self, collector_id):
+        if collector_id in self.running_threads:
+            thread, stop_event = self.running_threads[collector_id]
+            stop_event.set()
+            thread.join()
+            print ("Terminating backend (by StopCollector): Collector Id: ", collector_id)
+            del self.running_threads[collector_id]
+            self.generate_kafka_response(collector_id, "-1", -1)
+
+    def create_topic_if_not_exists(self, new_topics: list) -> bool:
+        """
+        Method to create Kafka topic if it does not exist.
+        Args:
+            admin_client (AdminClient): Kafka admin client.
+        """
+        for topic in new_topics:
+            try:
+                topic_metadata = ADMIN_KAFKA_CLIENT.list_topics(timeout=5)
+                if topic not in topic_metadata.topics:
+                    # If the topic does not exist, create a new topic
+                    print(f"Topic '{topic}' does not exist. Creating...")
+                    LOGGER.warning("Topic {:} does not exist. Creating...".format(topic))
+                    new_topic = NewTopic(topic, num_partitions=1, replication_factor=1)
+                    ADMIN_KAFKA_CLIENT.create_topics([new_topic])
+            except KafkaException as e:
+                print(f"Failed to create topic: {e}")
+                return False
+        return True
+
+    @staticmethod
+    def delivery_callback( err, msg):
+        """
+        Callback function to handle message delivery status.
+        Args:
+            err (KafkaError): Kafka error object.
+            msg (Message): Kafka message object.
+        """
+        if err:
+            print(f'Message delivery failed: {err}')
+        else:
+            print(f'Message delivered to topic {msg.topic()}')
+
+# ----------- BELOW: Actual Implementation of Kafka Producer with Node Exporter -----------
+    @staticmethod
+    def fetch_single_node_exporter_metric():
+        """
+        Method to fetch metrics from Node Exporter.
+        Returns:
+            str: Metrics fetched from Node Exporter.
+        """
+        KPI = "node_network_receive_packets_total"
+        try:
+            response = requests.get(EXPORTER_ENDPOINT) # type: ignore
+            LOGGER.info("Request status {:}".format(response))
+            if response.status_code == 200:
+                # print(f"Metrics fetched sucessfully...")
+                metrics = response.text
+                # Check if the desired metric is available in the response
+                if KPI in metrics:
+                    KPI_VALUE = TelemetryBackendService.extract_metric_value(metrics, KPI)
+                    # Extract the metric value
+                    if KPI_VALUE is not None:
+                        LOGGER.info("Extracted value of {:} is {:}".format(KPI, KPI_VALUE))
+                        print(f"Extracted value of {KPI} is: {KPI_VALUE}")
+                        return KPI_VALUE
+            else:
+                LOGGER.info("Failed to fetch metrics. Status code: {:}".format(response.status_code))
+                # print(f"Failed to fetch metrics. Status code: {response.status_code}")
+                return None
+        except Exception as e:
+            LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e))
+            # print(f"Failed to fetch metrics: {str(e)}")
+            return None
+
+    @staticmethod
+    def extract_metric_value(metrics, metric_name):
+        """
+        Method to extract the value of a metric from the metrics string.
+        Args:
+            metrics (str): Metrics string fetched from Exporter.
+            metric_name (str): Name of the metric to extract.
+        Returns:
+            float: Value of the extracted metric, or None if not found.
+        """
+        try:
+            # Find the metric line containing the desired metric name
+            metric_line = next(line for line in metrics.split('\n') if line.startswith(metric_name))
+            # Split the line to extract the metric value
+            metric_value = float(metric_line.split()[1])
+            return metric_value
+        except StopIteration:
+            print(f"Metric '{metric_name}' not found in the metrics.")
+            return None
+
+    @staticmethod
+    def stream_node_export_metrics_to_raw_topic():
+        try:
+            while True:
+                response = requests.get(EXPORTER_ENDPOINT)
+                # print("Response Status {:} ".format(response))
+                # LOGGER.info("Response Status {:} ".format(response))
+                try: 
+                    if response.status_code == 200:
+                        producerObj = KafkaProducer(PRODUCER_CONFIG)
+                        producerObj.produce(KAFKA_TOPICS['raw'], key="raw", value= str(response.text), callback=TelemetryBackendService.delivery_callback)
+                        producerObj.flush()
+                        LOGGER.info("Produce to topic")
+                    else:
+                        LOGGER.info("Didn't received expected response. Status code: {:}".format(response.status_code))
+                        print(f"Didn't received expected response. Status code: {response.status_code}")
+                        return None
+                    time.sleep(15)
+                except Exception as e:
+                    LOGGER.info("Failed to process response. Status code: {:}".format(e))
+                    return None
+        except Exception as e:
+            LOGGER.info("Failed to fetch metrics. Status code: {:}".format(e))
+            print(f"Failed to fetch metrics: {str(e)}")
+            return None
+# ----------- ABOVE: Actual Implementation of Kafka Producer with Node Exporter -----------
\ No newline at end of file
diff --git a/src/telemetry/backend/service/__init__.py b/src/telemetry/backend/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02
--- /dev/null
+++ b/src/telemetry/backend/service/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/telemetry/backend/tests/__init__.py b/src/telemetry/backend/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbfc943b68af13a11e562abbc8680ade71db8f02
--- /dev/null
+++ b/src/telemetry/backend/tests/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/telemetry/backend/tests/messagesBackend.py b/src/telemetry/backend/tests/messagesBackend.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cf553eaaec41de7599b6723e31e4ca3f82cbcae
--- /dev/null
+++ b/src/telemetry/backend/tests/messagesBackend.py
@@ -0,0 +1,15 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
diff --git a/src/telemetry/backend/tests/testTelemetryBackend.py b/src/telemetry/backend/tests/testTelemetryBackend.py
new file mode 100644
index 0000000000000000000000000000000000000000..d832e54e77589ca677682760d19e68b1bd09b1f7
--- /dev/null
+++ b/src/telemetry/backend/tests/testTelemetryBackend.py
@@ -0,0 +1,53 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+print (sys.path)
+sys.path.append('/home/tfs/tfs-ctrl')
+import threading
+import logging
+from typing import Tuple
+# from common.proto.context_pb2 import Empty
+from src.telemetry.backend.service.TelemetryBackendService import TelemetryBackendService
+
+LOGGER = logging.getLogger(__name__)
+
+
+###########################
+# Tests Implementation of Telemetry Backend
+###########################
+
+def test_verify_kafka_topics():
+    LOGGER.info('test_verify_kafka_topics requesting')
+    TelemetryBackendServiceObj = TelemetryBackendService()
+    KafkaTopics = ['topic_request', 'topic_response', 'topic_raw', 'topic_labled']
+    response = TelemetryBackendServiceObj.create_topic_if_not_exists(KafkaTopics)
+    LOGGER.debug(str(response))
+    assert isinstance(response, bool)
+
+# def test_run_kafka_listener():
+#     LOGGER.info('test_receive_kafka_request requesting')
+#     TelemetryBackendServiceObj = TelemetryBackendService()
+#     response = TelemetryBackendServiceObj.run_kafka_listener()
+#     LOGGER.debug(str(response))
+#     assert isinstance(response, bool)
+
+# def test_fetch_node_exporter_metrics():
+#     LOGGER.info(' >>> test_fetch_node_exporter_metrics START <<< ')
+#     TelemetryBackendService.fetch_single_node_exporter_metric()
+
+def test_stream_node_export_metrics_to_raw_topic():
+    LOGGER.info(' >>> test_stream_node_export_metrics_to_raw_topic START <<< ')
+    threading.Thread(target=TelemetryBackendService.stream_node_export_metrics_to_raw_topic, args=()).start()
+
diff --git a/src/telemetry/database/TelemetryDBmanager.py b/src/telemetry/database/TelemetryDBmanager.py
new file mode 100644
index 0000000000000000000000000000000000000000..b558180a9e1fbf85bf523c7faededf58f57e2264
--- /dev/null
+++ b/src/telemetry/database/TelemetryDBmanager.py
@@ -0,0 +1,248 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, time
+import sqlalchemy
+from sqlalchemy import inspect, MetaData, Table
+from sqlalchemy.orm import sessionmaker
+from telemetry.database.TelemetryModel import Collector as CollectorModel
+from telemetry.database.TelemetryModel import Kpi as KpiModel
+from sqlalchemy.ext.declarative import declarative_base
+from telemetry.database.TelemetryEngine import TelemetryEngine
+from common.proto.kpi_manager_pb2 import KpiDescriptor, KpiId
+from common.proto.telemetry_frontend_pb2 import Collector, CollectorId
+from sqlalchemy.exc import SQLAlchemyError
+from telemetry.database.TelemetryModel import Base
+
+LOGGER = logging.getLogger(__name__)
+DB_NAME = "telemetryfrontend"
+
+class TelemetryDBmanager:
+    def __init__(self):
+        self.db_engine = TelemetryEngine.get_engine()
+        if self.db_engine is None:
+            LOGGER.error('Unable to get SQLAlchemy DB Engine...')
+            return False
+        self.db_name = DB_NAME
+        self.Session = sessionmaker(bind=self.db_engine)
+
+    def create_database(self):
+        try:
+            # with self.db_engine.connect() as connection:
+            #     connection.execute(f"CREATE DATABASE {self.db_name};")
+            TelemetryEngine.create_database(self.db_engine)
+            LOGGER.info('TelemetryDBmanager initalized DB Name: {:}'.format(self.db_name))
+            return True
+        except Exception as e: # pylint: disable=bare-except # pragma: no cover
+            LOGGER.exception('Failed to check/create the database: {:s}'.format(str(e)))
+            return False
+
+    def create_tables(self):
+        try:
+            Base.metadata.create_all(self.db_engine)     # type: ignore
+            LOGGER.info("Tables created in database ({:}) the as per Models".format(self.db_name))
+        except Exception as e:
+            LOGGER.info("Tables cannot be created in the TelemetryFrontend database. {:s}".format(str(e)))
+
+    def verify_tables(self):
+        try:
+            with self.db_engine.connect() as connection:
+                result = connection.execute("SHOW TABLES;")
+                tables = result.fetchall()
+                LOGGER.info("Tables in DB: {:}".format(tables))
+        except Exception as e:
+            LOGGER.info("Unable to fetch Table names. {:s}".format(str(e)))
+
+    def drop_table(self, table_to_drop: str):
+        try:
+            inspector = inspect(self.db_engine)
+            existing_tables = inspector.get_table_names()
+            if table_to_drop in existing_tables:
+                table = Table(table_to_drop, MetaData(), autoload_with=self.db_engine)
+                table.drop(self.db_engine)
+                LOGGER.info("Tables delete in the DB Name: {:}".format(self.db_name))
+            else:
+                LOGGER.warning("No table {:} in database {:} ".format(table_to_drop, DB_NAME))
+        except Exception as e:
+            LOGGER.info("Tables cannot be deleted in the {:} database. {:s}".format(DB_NAME, str(e)))
+
+    def list_databases(self):
+        query = "SHOW DATABASES"
+        with self.db_engine.connect() as connection:
+            result = connection.execute(query)
+            databases = [row[0] for row in result]
+        LOGGER.info("List of available DBs: {:}".format(databases))
+        
+# ------------------ INSERT METHODs --------------------------------------
+
+    def inser_kpi(self, request: KpiDescriptor):
+        session = self.Session()
+        try:
+            # Create a new Kpi instance
+            kpi_to_insert                 = KpiModel()
+            kpi_to_insert.kpi_id          = request.kpi_id.kpi_id.uuid
+            kpi_to_insert.kpi_description = request.kpi_description
+            kpi_to_insert.kpi_sample_type = request.kpi_sample_type
+            kpi_to_insert.device_id       = request.service_id.service_uuid.uuid 
+            kpi_to_insert.endpoint_id     = request.device_id.device_uuid.uuid 
+            kpi_to_insert.service_id      = request.slice_id.slice_uuid.uuid 
+            kpi_to_insert.slice_id        = request.endpoint_id.endpoint_uuid.uuid
+            kpi_to_insert.connection_id   = request.connection_id.connection_uuid.uuid
+            # kpi_to_insert.link_id         = request.link_id.link_id.uuid
+            # Add the instance to the session
+            session.add(kpi_to_insert)
+            session.commit()
+            LOGGER.info("Row inserted into kpi table: {:}".format(kpi_to_insert.kpi_id))
+        except Exception as e:
+            session.rollback()
+            LOGGER.info("Failed to insert new kpi. {:s}".format(str(e)))
+        finally:
+            # Close the session
+            session.close()
+
+    # Function to insert a row into the Collector model
+    def insert_collector(self, request: Collector):
+        session = self.Session()
+        try:
+            # Create a new Collector instance
+            collector_to_insert                     = CollectorModel()
+            collector_to_insert.collector_id        = request.collector_id.collector_id.uuid
+            collector_to_insert.kpi_id              = request.kpi_id.kpi_id.uuid  
+            collector_to_insert.collector           = "Test collector description"
+            collector_to_insert.sampling_duration_s = request.duration_s
+            collector_to_insert.sampling_interval_s = request.interval_s
+            collector_to_insert.start_timestamp     = time.time()
+            collector_to_insert.end_timestamp       = time.time()
+            
+            session.add(collector_to_insert)
+            session.commit()
+            LOGGER.info("Row inserted into collector table: {:}".format(collector_to_insert.collector_id))
+        except Exception as e:
+            session.rollback()
+            LOGGER.info("Failed to insert new collector. {:s}".format(str(e)))
+        finally:
+            # Close the session
+            session.close()
+
+# ------------------ GET METHODs --------------------------------------
+
+    def get_kpi_descriptor(self, request: KpiId):
+        session = self.Session()
+        try:
+            kpi_id_to_search = request.kpi_id.uuid
+            kpi = session.query(KpiModel).filter_by(kpi_id=kpi_id_to_search).first()
+            if kpi:
+                LOGGER.info("kpi ID found: {:s}".format(str(kpi)))
+                return kpi
+            else:
+                LOGGER.warning("Kpi ID not found {:s}".format(str(kpi_id_to_search)))
+                return None
+        except Exception as e:
+            session.rollback()
+            LOGGER.info("Failed to retrieve KPI ID. {:s}".format(str(e)))
+            raise
+        finally:
+            session.close()
+
+    def get_collector(self, request: CollectorId):
+        session = self.Session()
+        try:
+            collector_id_to_search = request.collector_id.uuid
+            collector = session.query(CollectorModel).filter_by(collector_id=collector_id_to_search).first()
+            if collector:
+                LOGGER.info("collector ID found: {:s}".format(str(collector)))
+                return collector
+            else:
+                LOGGER.warning("collector ID not found{:s}".format(str(collector_id_to_search)))
+                return None
+        except Exception as e:
+            session.rollback()
+            LOGGER.info("Failed to retrieve collector ID. {:s}".format(str(e)))
+            raise
+        finally:
+            session.close()
+    
+    # ------------------ SELECT METHODs --------------------------------------
+
+    def select_kpi_descriptor(self, **filters):
+        session = self.Session()
+        try:
+            query = session.query(KpiModel)
+            for column, value in filters.items():
+                query = query.filter(getattr(KpiModel, column) == value)
+            result = query.all()
+            if len(result) != 0:
+                LOGGER.info("Fetched filtered rows from KPI table with filters : {:s}".format(str(result)))
+            else:
+                LOGGER.warning("No matching row found : {:s}".format(str(result)))
+            return result
+        except SQLAlchemyError as e:
+            LOGGER.error("Error fetching filtered rows from KPI table with filters {:}: {:}".format(filters, e))
+            return []
+        finally:
+            session.close()
+    
+    def select_collector(self, **filters):
+        session = self.Session()
+        try:
+            query = session.query(CollectorModel)
+            for column, value in filters.items():
+                query = query.filter(getattr(CollectorModel, column) == value)
+            result = query.all()
+            if len(result) != 0:
+                LOGGER.info("Fetched filtered rows from KPI table with filters : {:s}".format(str(result)))
+            else:
+                LOGGER.warning("No matching row found : {:s}".format(str(result)))            
+            return result
+        except SQLAlchemyError as e:
+            LOGGER.error("Error fetching filtered rows from KPI table with filters {:}: {:}".format(filters, e))
+            return []
+        finally:
+            session.close()
+
+# ------------------ DELETE METHODs --------------------------------------
+
+    def delete_kpi_descriptor(self, request: KpiId):
+        session = self.Session()
+        try:
+            kpi_id_to_delete = request.kpi_id.uuid
+            kpi = session.query(KpiModel).filter_by(kpi_id=kpi_id_to_delete).first()
+            if kpi:
+                session.delete(kpi)
+                session.commit()
+                LOGGER.info("Deleted KPI with kpi_id: %s", kpi_id_to_delete)
+            else:
+                LOGGER.warning("KPI with kpi_id %s not found", kpi_id_to_delete)
+        except SQLAlchemyError as e:
+            session.rollback()
+            LOGGER.error("Error deleting KPI with kpi_id %s: %s", kpi_id_to_delete, e)
+        finally:
+            session.close()
+
+    def delete_collector(self, request: CollectorId):
+        session = self.Session()
+        try:
+            collector_id_to_delete = request.collector_id.uuid
+            collector = session.query(CollectorModel).filter_by(collector_id=collector_id_to_delete).first()
+            if collector:
+                session.delete(collector)
+                session.commit()
+                LOGGER.info("Deleted collector with collector_id: %s", collector_id_to_delete)
+            else:
+                LOGGER.warning("collector with collector_id %s not found", collector_id_to_delete)
+        except SQLAlchemyError as e:
+            session.rollback()
+            LOGGER.error("Error deleting collector with collector_id %s: %s", collector_id_to_delete, e)
+        finally:
+            session.close()
\ No newline at end of file
diff --git a/src/telemetry/database/TelemetryEngine.py b/src/telemetry/database/TelemetryEngine.py
new file mode 100644
index 0000000000000000000000000000000000000000..a563fa09f94c812aed07d0aa3cbd5bc988737fc4
--- /dev/null
+++ b/src/telemetry/database/TelemetryEngine.py
@@ -0,0 +1,59 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, sqlalchemy, sqlalchemy_utils
+# from common.Settings import get_setting
+
+LOGGER = logging.getLogger(__name__)
+
+APP_NAME = 'tfs'
+ECHO = False                # False: No dump SQL commands and transactions executed
+CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}'
+# CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}'
+
+class TelemetryEngine:
+    # def __init__(self):
+    #     self.engine = self.get_engine()
+    @staticmethod
+    def get_engine() -> sqlalchemy.engine.Engine:
+        CRDB_NAMESPACE = "crdb"
+        CRDB_SQL_PORT  = "26257"
+        CRDB_DATABASE  = "telemetryfrontend"
+        CRDB_USERNAME  = "tfs"
+        CRDB_PASSWORD  = "tfs123"
+        CRDB_SSLMODE   = "require"
+        crdb_uri = CRDB_URI_TEMPLATE.format(
+                CRDB_USERNAME, CRDB_PASSWORD, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
+        # crdb_uri = CRDB_URI_TEMPLATE.format(
+        #         CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
+        try:
+            # engine = sqlalchemy.create_engine(
+            #     crdb_uri, connect_args={'application_name': APP_NAME}, echo=ECHO, future=True)
+            engine = sqlalchemy.create_engine(crdb_uri, echo=False)
+            LOGGER.info(' TelemetryDBmanager initalized with DB URL: {:}'.format(crdb_uri))
+        except: # pylint: disable=bare-except # pragma: no cover
+            LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri)))
+            return None # type: ignore
+        return engine # type: ignore
+
+    @staticmethod
+    def create_database(engine : sqlalchemy.engine.Engine) -> None:
+        if not sqlalchemy_utils.database_exists(engine.url):
+            LOGGER.info("Database created. {:}".format(engine.url))
+            sqlalchemy_utils.create_database(engine.url)
+
+    @staticmethod
+    def drop_database(engine : sqlalchemy.engine.Engine) -> None:
+        if sqlalchemy_utils.database_exists(engine.url):
+            sqlalchemy_utils.drop_database(engine.url)
diff --git a/src/telemetry/database/TelemetryModel.py b/src/telemetry/database/TelemetryModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..be4f0969c86638520cf226b8e42db90426165804
--- /dev/null
+++ b/src/telemetry/database/TelemetryModel.py
@@ -0,0 +1,45 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy import Column, Integer, String, Float, Text, ForeignKey
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import sessionmaker, relationship
+from sqlalchemy.orm import registry
+
+logging.basicConfig(level=logging.INFO)
+LOGGER = logging.getLogger(__name__)
+
+# Create a base class for declarative models
+Base = registry().generate_base()
+# Base = declarative_base()
+    
+class Collector(Base):
+    __tablename__ = 'collector'
+
+    collector_id         = Column(UUID(as_uuid=False), primary_key=True)
+    kpi_id               = Column(UUID(as_uuid=False))
+    collector_decription = Column(String)
+    sampling_duration_s  = Column(Float)
+    sampling_interval_s  = Column(Float)
+    start_timestamp      = Column(Float)
+    end_timestamp        = Column(Float)
+
+
+    def __repr__(self):
+        return (f"<Collector(collector_id='{self.collector_id}', kpi_id='{self.kpi_id}', "
+                f"collector='{self.collector_decription}', sampling_duration_s='{self.sampling_duration_s}', "
+                f"sampling_interval_s='{self.sampling_interval_s}', start_timestamp='{self.start_timestamp}', "
+                f"end_timestamp='{self.end_timestamp}')>")
\ No newline at end of file
diff --git a/src/telemetry/database/__init__.py b/src/telemetry/database/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/telemetry/database/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/telemetry/database/__main__.py b/src/telemetry/database/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cf553eaaec41de7599b6723e31e4ca3f82cbcae
--- /dev/null
+++ b/src/telemetry/database/__main__.py
@@ -0,0 +1,15 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
diff --git a/src/telemetry/database/managementDB.py b/src/telemetry/database/managementDB.py
new file mode 100644
index 0000000000000000000000000000000000000000..f79126f279d7bbece6c08ae5eb1cd74e340d1c7d
--- /dev/null
+++ b/src/telemetry/database/managementDB.py
@@ -0,0 +1,138 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, time
+import sqlalchemy
+import sqlalchemy_utils
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.ext.declarative import declarative_base
+from telemetry.database.TelemetryEngine import TelemetryEngine
+from telemetry.database.TelemetryModel import Base
+
+LOGGER = logging.getLogger(__name__)
+DB_NAME = "telemetryfrontend"
+
+# # Create a base class for declarative models
+# Base = declarative_base()
+
+class managementDB:
+    def __init__(self):
+        self.db_engine = TelemetryEngine.get_engine()
+        if self.db_engine is None:
+            LOGGER.error('Unable to get SQLAlchemy DB Engine...')
+            return False
+        self.db_name = DB_NAME
+        self.Session = sessionmaker(bind=self.db_engine)
+
+    @staticmethod
+    def create_database(engine : sqlalchemy.engine.Engine) -> None:
+        if not sqlalchemy_utils.database_exists(engine.url):
+            LOGGER.info("Database created. {:}".format(engine.url))
+            sqlalchemy_utils.create_database(engine.url)
+
+    @staticmethod
+    def drop_database(engine : sqlalchemy.engine.Engine) -> None:
+        if sqlalchemy_utils.database_exists(engine.url):
+            sqlalchemy_utils.drop_database(engine.url)
+
+    # def create_database(self):
+    #     try:
+    #         with self.db_engine.connect() as connection:
+    #             connection.execute(f"CREATE DATABASE {self.db_name};")
+    #         LOGGER.info('managementDB initalizes database. Name: {self.db_name}')
+    #         return True
+    #     except: 
+    #         LOGGER.exception('Failed to check/create the database: {:s}'.format(str(self.db_engine.url)))
+    #         return False
+    
+    @staticmethod
+    def create_tables(engine : sqlalchemy.engine.Engine):
+        try:
+            Base.metadata.create_all(engine)     # type: ignore
+            LOGGER.info("Tables created in the DB Name: {:}".format(DB_NAME))
+        except Exception as e:
+            LOGGER.info("Tables cannot be created in the TelemetryFrontend database. {:s}".format(str(e)))
+
+    def verify_tables(self):
+        try:
+            with self.db_engine.connect() as connection:
+                result = connection.execute("SHOW TABLES;")
+                tables = result.fetchall()      # type: ignore
+                LOGGER.info("Tables verified: {:}".format(tables))
+        except Exception as e:
+            LOGGER.info("Unable to fetch Table names. {:s}".format(str(e)))
+
+    @staticmethod
+    def add_row_to_db(self, row):
+        session = self.Session()
+        try:
+            session.add(row)
+            session.commit()
+            LOGGER.info(f"Row inserted into {row.__class__.__name__} table.")
+        except Exception as e:
+            session.rollback()
+            LOGGER.error(f"Failed to insert new row into {row.__class__.__name__} table. {str(e)}")
+        finally:
+            session.close()
+    
+    def search_db_row_by_id(self, model, col_name, id_to_search):
+        session = self.Session()
+        try:
+            entity = session.query(model).filter_by(**{col_name: id_to_search}).first()
+            if entity:
+                LOGGER.info(f"{model.__name__} ID found: {str(entity)}")
+                return entity
+            else:
+                LOGGER.warning(f"{model.__name__} ID not found: {str(id_to_search)}")
+                return None
+        except Exception as e:
+            session.rollback()
+            LOGGER.info(f"Failed to retrieve {model.__name__} ID. {str(e)}")
+            raise
+        finally:
+            session.close()
+    
+    def delete_db_row_by_id(self, model, col_name, id_to_search):
+        session = self.Session()
+        try:
+            record = session.query(model).filter_by(**{col_name: id_to_search}).first()
+            if record:
+                session.delete(record)
+                session.commit()
+                LOGGER.info("Deleted %s with %s: %s", model.__name__, col_name, id_to_search)
+            else:
+                LOGGER.warning("%s with %s %s not found", model.__name__, col_name, id_to_search)
+        except Exception as e:
+            session.rollback()
+            LOGGER.error("Error deleting %s with %s %s: %s", model.__name__, col_name, id_to_search, e)
+        finally:
+            session.close()
+    
+    def select_with_filter(self, model, **filters):
+        session = self.Session()
+        try:
+            query = session.query(model)
+            for column, value in filters.items():
+                query = query.filter(getattr(model, column) == value) # type: ignore   
+            result = query.all()
+            if result:
+                LOGGER.info(f"Fetched filtered rows from {model.__name__} table with filters: {filters}") #  - Results: {result}
+            else:
+                LOGGER.warning(f"No matching row found in {model.__name__} table with filters: {filters}")
+            return result
+        except Exception as e:
+            LOGGER.error(f"Error fetching filtered rows from {model.__name__} table with filters {filters} ::: {e}")
+            return []
+        finally:
+            session.close()
\ No newline at end of file
diff --git a/src/telemetry/database/tests/__init__.py b/src/telemetry/database/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..839e45e3b646bc60de7edd81fcfb91b7b38feadf
--- /dev/null
+++ b/src/telemetry/database/tests/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
\ No newline at end of file
diff --git a/src/telemetry/database/tests/managementDBtests.py b/src/telemetry/database/tests/managementDBtests.py
new file mode 100644
index 0000000000000000000000000000000000000000..24138abe42be742bd9b16d7840343f9d7c7fe133
--- /dev/null
+++ b/src/telemetry/database/tests/managementDBtests.py
@@ -0,0 +1,22 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from telemetry.database.managementDB import managementDB
+from telemetry.database.tests.messages import create_collector_model_object
+
+
+def test_add_row_to_db():
+    managementDBobj = managementDB()
+    managementDBobj.add_row_to_db(create_collector_model_object())
\ No newline at end of file
diff --git a/src/telemetry/database/tests/messages.py b/src/telemetry/database/tests/messages.py
new file mode 100644
index 0000000000000000000000000000000000000000..6919eecc62da0794869f334c4de85cb129fbab14
--- /dev/null
+++ b/src/telemetry/database/tests/messages.py
@@ -0,0 +1,80 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+import uuid
+import random
+from common.proto import telemetry_frontend_pb2
+from common.proto import kpi_manager_pb2
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from telemetry.database.TelemetryModel import Collector as CollectorModel
+
+
+def create_collector_request():
+    _create_collector_request                                = telemetry_frontend_pb2.Collector()
+    _create_collector_request.collector_id.collector_id.uuid = str(uuid.uuid4())
+    _create_collector_request.kpi_id.kpi_id.uuid             = '71d58648-bf47-49ac-996f-e63a9fbfead4' # must be primary key in kpi table
+    # _create_collector_request.kpi_id.kpi_id.uuid             = str(uuid.uuid4())
+    _create_collector_request.duration_s                     = float(random.randint(8, 16))
+    _create_collector_request.interval_s                     = float(random.randint(2, 4))
+    return _create_collector_request
+
+def create_kpi_request():
+    _create_kpi_request                                     = kpi_manager_pb2.KpiDescriptor()
+    _create_kpi_request.kpi_id.kpi_id.uuid                  = str(uuid.uuid4())
+    _create_kpi_request.kpi_description                     = 'KPI Description Test'
+    _create_kpi_request.kpi_sample_type                     = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED
+    _create_kpi_request.service_id.service_uuid.uuid        = 'SERV' 
+    _create_kpi_request.device_id.device_uuid.uuid          = 'DEV'  
+    _create_kpi_request.slice_id.slice_uuid.uuid            = 'SLC'  
+    _create_kpi_request.endpoint_id.endpoint_uuid.uuid      = 'END'  
+    _create_kpi_request.connection_id.connection_uuid.uuid  = 'CON'  
+    # _create_kpi_request.link_id.link_id.uuid                = 'LNK'
+    return _create_kpi_request
+
+def create_kpi_id_request():
+    _create_kpi_id_request             = kpi_manager_pb2.KpiId()
+    _create_kpi_id_request.kpi_id.uuid = '71d58648-bf47-49ac-996f-e63a9fbfead4'
+    return _create_kpi_id_request
+
+def create_collector_id_request():
+    _create_collector_id_request                   = telemetry_frontend_pb2.CollectorId()
+    _create_collector_id_request.collector_id.uuid = '71d58648-bf47-49ac-996f-e63a9fbfead4'
+    return _create_collector_id_request
+
+def create_kpi_filter_request():
+    # create a dict as follows: 'Key' = 'KpiModel' column name and 'Value' = filter to apply.
+    _create_kpi_filter_request                    = dict()
+    _create_kpi_filter_request['kpi_sample_type'] = 102
+    _create_kpi_filter_request['kpi_id']          = '3a17230d-8e95-4afb-8b21-6965481aee5a'
+    return _create_kpi_filter_request
+
+def create_collector_filter_request():
+    # create a dict as follows: 'Key' = 'KpiModel' column name and 'Value' = filter to apply.
+    _create_kpi_filter_request                        = dict()
+    _create_kpi_filter_request['sampling_interval_s'] = 3.0
+    # _create_kpi_filter_request['kpi_id']              = '11e2c6c6-b507-40aa-ab3a-ffd41e7125f0'
+    return _create_kpi_filter_request
+
+def create_collector_model_object():
+    # Create a new Collector instance
+    collector_to_insert                     = CollectorModel()
+    collector_to_insert.collector_id        = str(uuid.uuid4())
+    collector_to_insert.kpi_id              = '3a17230d-8e95-4afb-8b21-6965481aee5a'
+    collector_to_insert.collector           = "Test collector description"
+    collector_to_insert.sampling_duration_s = 15
+    collector_to_insert.sampling_interval_s = 3
+    collector_to_insert.start_timestamp     = time.time()
+    collector_to_insert.end_timestamp       = time.time()
+    return collector_to_insert
\ No newline at end of file
diff --git a/src/telemetry/database/tests/telemetryDBtests.py b/src/telemetry/database/tests/telemetryDBtests.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d221106419d6e4ee4b313adf10c90c5e6be7666
--- /dev/null
+++ b/src/telemetry/database/tests/telemetryDBtests.py
@@ -0,0 +1,86 @@
+
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Any
+from sqlalchemy.ext.declarative import declarative_base
+from telemetry.database.TelemetryDBmanager import TelemetryDBmanager
+from telemetry.database.TelemetryEngine import TelemetryEngine
+from telemetry.database.tests import temp_DB
+from .messages import create_kpi_request, create_collector_request, \
+                        create_kpi_id_request, create_kpi_filter_request, \
+                        create_collector_id_request, create_collector_filter_request
+
+logging.basicConfig(level=logging.INFO)
+LOGGER = logging.getLogger(__name__)
+
+
+# def test_temp_DB():
+#     temp_DB.main()
+
+def test_telemetry_object_creation():
+    LOGGER.info('--- test_telemetry_object_creation: START')
+
+    LOGGER.info('>>> Creating TelemetryDBmanager Object <<< ')
+    TelemetryDBmanagerObj = TelemetryDBmanager()
+    TelemetryEngine.create_database(TelemetryDBmanagerObj.db_engine)        # creates 'frontend' db, if it doesnot exists.
+
+    LOGGER.info('>>> Creating database <<< ')
+    TelemetryDBmanagerObj.create_database()
+
+    LOGGER.info('>>> verifing database <<< ')
+    TelemetryDBmanagerObj.list_databases()
+
+    # # LOGGER.info('>>> Droping Tables: ')
+    # # TelemetryDBmanagerObj.drop_table("table_naem_here")
+
+    LOGGER.info('>>> Creating Tables <<< ')
+    TelemetryDBmanagerObj.create_tables()
+
+    LOGGER.info('>>> Verifing Table creation <<< ')
+    TelemetryDBmanagerObj.verify_tables()
+
+    # LOGGER.info('>>> TESTING: Row Insertion Operation: kpi Table <<<')
+    # kpi_obj = create_kpi_request()
+    # TelemetryDBmanagerObj.inser_kpi(kpi_obj)
+
+    # LOGGER.info('>>> TESTING: Row Insertion Operation: collector Table <<<')
+    # collector_obj = create_collector_request()
+    # TelemetryDBmanagerObj.insert_collector(collector_obj)
+
+    # LOGGER.info('>>> TESTING: Get KpiDescriptor  <<<')
+    # kpi_id_obj = create_kpi_id_request()
+    # TelemetryDBmanagerObj.get_kpi_descriptor(kpi_id_obj)
+
+    # LOGGER.info('>>> TESTING: Select Collector  <<<')
+    # collector_id_obj = create_collector_id_request()
+    # TelemetryDBmanagerObj.get_collector(collector_id_obj)
+
+    # LOGGER.info('>>> TESTING: Applying kpi filter  <<< ')
+    # kpi_filter : dict[str, Any] = create_kpi_filter_request()
+    # TelemetryDBmanagerObj.select_kpi_descriptor(**kpi_filter)
+
+    # LOGGER.info('>>> TESTING: Applying collector filter   <<<')
+    # collector_filter : dict[str, Any] = create_collector_filter_request()
+    # TelemetryDBmanagerObj.select_collector(**collector_filter)
+    
+    # LOGGER.info('>>> TESTING: Delete KpiDescriptor ')
+    # kpi_id_obj = create_kpi_id_request()
+    # TelemetryDBmanagerObj.delete_kpi_descriptor(kpi_id_obj)
+
+    # LOGGER.info('>>> TESTING: Delete Collector ')
+    # collector_id_obj = create_collector_id_request()
+    # TelemetryDBmanagerObj.delete_collector(collector_id_obj)
+    
\ No newline at end of file
diff --git a/src/telemetry/database/tests/temp_DB.py b/src/telemetry/database/tests/temp_DB.py
new file mode 100644
index 0000000000000000000000000000000000000000..089d3542492c2da87b839416f7118749bb82caad
--- /dev/null
+++ b/src/telemetry/database/tests/temp_DB.py
@@ -0,0 +1,327 @@
+from sqlalchemy import create_engine, Column, String, Integer, Text, Float, ForeignKey
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import sessionmaker, relationship
+from sqlalchemy.dialects.postgresql import UUID
+import logging
+
+LOGGER = logging.getLogger(__name__)
+Base = declarative_base()
+
+class Kpi(Base):
+    __tablename__ = 'kpi'
+
+    kpi_id          = Column(UUID(as_uuid=False), primary_key=True)
+    kpi_description = Column(Text)
+    kpi_sample_type = Column(Integer)
+    device_id       = Column(String)
+    endpoint_id     = Column(String)
+    service_id      = Column(String)
+    slice_id        = Column(String)
+    connection_id   = Column(String)
+    link_id         = Column(String)
+
+    collectors = relationship('Collector', back_populates='kpi')
+
+    def __repr__(self):
+        return (f"<Kpi(kpi_id='{self.kpi_id}', kpi_description='{self.kpi_description}', "
+                f"kpi_sample_type='{self.kpi_sample_type}', device_id='{self.device_id}', "
+                f"endpoint_id='{self.endpoint_id}', service_id='{self.service_id}', "
+                f"slice_id='{self.slice_id}', connection_id='{self.connection_id}', "
+                f"link_id='{self.link_id}')>")
+    
+class Collector(Base):
+    __tablename__ = 'collector'
+
+    collector_id        = Column(UUID(as_uuid=False), primary_key=True)
+    kpi_id              = Column(UUID(as_uuid=False), ForeignKey('kpi.kpi_id'))
+    collector           = Column(String)
+    sampling_duration_s = Column(Float)
+    sampling_interval_s = Column(Float)
+    start_timestamp     = Column(Float)
+    end_timestamp       = Column(Float)
+
+    kpi = relationship('Kpi', back_populates='collectors')
+
+    def __repr__(self):
+        return (f"<Collector(collector_id='{self.collector_id}', kpi_id='{self.kpi_id}', "
+                f"collector='{self.collector}', sampling_duration_s='{self.sampling_duration_s}', "
+                f"sampling_interval_s='{self.sampling_interval_s}', start_timestamp='{self.start_timestamp}', "
+                f"end_timestamp='{self.end_timestamp}')>")
+
+class DatabaseManager:
+    def __init__(self, db_url, db_name):
+        self.engine = create_engine(db_url)
+        self.db_name = db_name
+        self.Session = sessionmaker(bind=self.engine)
+        LOGGER.info("DatabaseManager initialized with DB URL: %s and DB Name: %s", db_url, db_name)
+
+    def create_database(self):
+        try:
+            with self.engine.connect() as connection:
+                connection.execute(f"CREATE DATABASE {self.db_name};")
+            LOGGER.info("Database '%s' created successfully.", self.db_name)
+        except Exception as e:
+            LOGGER.error("Error creating database '%s': %s", self.db_name, e)
+        finally:
+            LOGGER.info("create_database method execution finished.")
+
+    def create_tables(self):
+        try:
+            Base.metadata.create_all(self.engine)
+            LOGGER.info("Tables created successfully.")
+        except Exception as e:
+            LOGGER.error("Error creating tables: %s", e)
+        finally:
+            LOGGER.info("create_tables method execution finished.")
+
+    def verify_table_creation(self):
+        try:
+            with self.engine.connect() as connection:
+                result = connection.execute("SHOW TABLES;")
+                tables = result.fetchall()
+                LOGGER.info("Tables verified: %s", tables)
+                return tables
+        except Exception as e:
+            LOGGER.error("Error verifying table creation: %s", e)
+            return []
+        finally:
+            LOGGER.info("verify_table_creation method execution finished.")
+
+    def insert_row_kpi(self, kpi_data):
+        session = self.Session()
+        try:
+            new_kpi = Kpi(**kpi_data)
+            session.add(new_kpi)
+            session.commit()
+            LOGGER.info("Inserted row into KPI table: %s", kpi_data)
+        except Exception as e:
+            session.rollback()
+            LOGGER.error("Error inserting row into KPI table: %s", e)
+        finally:
+            session.close()
+            LOGGER.info("insert_row_kpi method execution finished.")
+
+    def insert_row_collector(self, collector_data):
+        session = self.Session()
+        try:
+            new_collector = Collector(**collector_data)
+            session.add(new_collector)
+            session.commit()
+            LOGGER.info("Inserted row into Collector table: %s", collector_data)
+        except Exception as e:
+            session.rollback()
+            LOGGER.error("Error inserting row into Collector table: %s", e)
+        finally:
+            session.close()
+            LOGGER.info("insert_row_collector method execution finished.")
+
+    def verify_insertion_kpi(self, kpi_id):
+        session = self.Session()
+        try:
+            kpi = session.query(Kpi).filter_by(kpi_id=kpi_id).first()
+            LOGGER.info("Verified insertion in KPI table for kpi_id: %s, Result: %s", kpi_id, kpi)
+            return kpi
+        except Exception as e:
+            LOGGER.error("Error verifying insertion in KPI table for kpi_id %s: %s", kpi_id, e)
+            return None
+        finally:
+            session.close()
+            LOGGER.info("verify_insertion_kpi method execution finished.")
+
+    def verify_insertion_collector(self, collector_id):
+        session = self.Session()
+        try:
+            collector = session.query(Collector).filter_by(collector_id=collector_id).first()
+            LOGGER.info("Verified insertion in Collector table for collector_id: %s, Result: %s", collector_id, collector)
+            return collector
+        except Exception as e:
+            LOGGER.error("Error verifying insertion in Collector table for collector_id %s: %s", collector_id, e)
+            return None
+        finally:
+            session.close()
+            LOGGER.info("verify_insertion_collector method execution finished.")
+
+    def get_all_kpi_rows(self):
+        session = self.Session()
+        try:
+            kpi_rows = session.query(Kpi).all()
+            LOGGER.info("Fetched all rows from KPI table: %s", kpi_rows)
+            return kpi_rows
+        except Exception as e:
+            LOGGER.error("Error fetching all rows from KPI table: %s", e)
+            return []
+        finally:
+            session.close()
+            LOGGER.info("get_all_kpi_rows method execution finished.")
+
+    def get_all_collector_rows(self):
+        session = self.Session()
+        try:
+            collector_rows = session.query(Collector).all()
+            LOGGER.info("Fetched all rows from Collector table: %s", collector_rows)
+            return collector_rows
+        except Exception as e:
+            LOGGER.error("Error fetching all rows from Collector table: %s", e)
+            return []
+        finally:
+            session.close()
+            LOGGER.info("get_all_collector_rows method execution finished.")
+
+    def get_filtered_kpi_rows(self, **filters):
+        session = self.Session()
+        try:
+            query = session.query(Kpi)
+            for column, value in filters.items():
+                query = query.filter(getattr(Kpi, column) == value)
+            result = query.all()
+            LOGGER.info("Fetched filtered rows from KPI table with filters ---------- : {:s}".format(str(result)))
+            return result
+        except NoResultFound:
+            LOGGER.warning("No results found in KPI table with filters %s", filters)
+            return []
+        except Exception as e:
+            LOGGER.error("Error fetching filtered rows from KPI table with filters %s: %s", filters, e)
+            return []
+        finally:
+            session.close()
+            LOGGER.info("get_filtered_kpi_rows method execution finished.")
+
+    def get_filtered_collector_rows(self, **filters):
+        session = self.Session()
+        try:
+            query = session.query(Collector)
+            for column, value in filters.items():
+                query = query.filter(getattr(Collector, column) == value)
+            result = query.all()
+            LOGGER.info("Fetched filtered rows from Collector table with filters %s: %s", filters, result)
+            return result
+        except NoResultFound:
+            LOGGER.warning("No results found in Collector table with filters %s", filters)
+            return []
+        except Exception as e:
+            LOGGER.error("Error fetching filtered rows from Collector table with filters %s: %s", filters, e)
+            return []
+        finally:
+            session.close()
+            LOGGER.info("get_filtered_collector_rows method execution finished.")
+
+    def delete_kpi_by_id(self, kpi_id):
+        session = self.Session()
+        try:
+            kpi = session.query(Kpi).filter_by(kpi_id=kpi_id).first()
+            if kpi:
+                session.delete(kpi)
+                session.commit()
+                LOGGER.info("Deleted KPI with kpi_id: %s", kpi_id)
+            else:
+                LOGGER.warning("KPI with kpi_id %s not found", kpi_id)
+        except SQLAlchemyError as e:
+            session.rollback()
+            LOGGER.error("Error deleting KPI with kpi_id %s: %s", kpi_id, e)
+        finally:
+            session.close()
+            LOGGER.info("delete_kpi_by_id method execution finished.")
+
+    def delete_collector_by_id(self, collector_id):
+        session = self.Session()
+        try:
+            collector = session.query(Collector).filter_by(collector_id=collector_id).first()
+            if collector:
+                session.delete(collector)
+                session.commit()
+                LOGGER.info("Deleted Collector with collector_id: %s", collector_id)
+            else:
+                LOGGER.warning("Collector with collector_id %s not found", collector_id)
+        except SQLAlchemyError as e:
+            session.rollback()
+            LOGGER.error("Error deleting Collector with collector_id %s: %s", collector_id, e)
+        finally:
+            session.close()
+            LOGGER.info("delete_collector_by_id method execution finished.")
+
+
+# Example Usage
+def main():
+    CRDB_SQL_PORT  = "26257"
+    CRDB_DATABASE  = "telemetryfrontend"
+    CRDB_USERNAME  = "tfs"
+    CRDB_PASSWORD  = "tfs123"
+    CRDB_SSLMODE   = "require"    
+    CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@127.0.0.1:{:s}/{:s}?sslmode={:s}'
+    crdb_uri = CRDB_URI_TEMPLATE.format(
+            CRDB_USERNAME, CRDB_PASSWORD, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE)
+    # db_url = "cockroachdb://username:password@localhost:26257/"
+    # db_name = "yourdatabase"
+    db_manager = DatabaseManager(crdb_uri, CRDB_DATABASE)
+
+    # Create database
+    db_manager.create_database()
+
+    # Update db_url to include the new database name
+    db_manager.engine = create_engine(f"{crdb_uri}")
+    db_manager.Session = sessionmaker(bind=db_manager.engine)
+
+    # Create tables
+    db_manager.create_tables()
+
+    # Verify table creation
+    tables = db_manager.verify_table_creation()
+    LOGGER.info('Tables in the database: {:s}'.format(str(tables)))    
+
+    # Insert a row into the KPI table
+    kpi_data = {
+        'kpi_id': '123e4567-e89b-12d3-a456-426614174100',
+        'kpi_description': 'Sample KPI',
+        'kpi_sample_type': 1,
+        'device_id': 'device_1',
+        'endpoint_id': 'endpoint_1',
+        'service_id': 'service_1',
+        'slice_id': 'slice_1',
+        'connection_id': 'conn_1',
+        'link_id': 'link_1'
+    }
+    db_manager.insert_row_kpi(kpi_data)
+
+    # Insert a row into the Collector table
+    collector_data = {
+        'collector_id': '123e4567-e89b-12d3-a456-426614174101',
+        'kpi_id': '123e4567-e89b-12d3-a456-426614174000',
+        'collector': 'Collector 1',
+        'sampling_duration_s': 60.0,
+        'sampling_interval_s': 10.0,
+        'start_timestamp': 1625247600.0,
+        'end_timestamp': 1625247660.0
+    }
+    db_manager.insert_row_collector(collector_data)
+
+    # Verify insertion into KPI table
+    kpi = db_manager.verify_insertion_kpi('123e4567-e89b-12d3-a456-426614174000')
+    print("Inserted KPI:", kpi)
+
+    # Verify insertion into Collector table
+    collector = db_manager.verify_insertion_collector('123e4567-e89b-12d3-a456-426614174001')
+    print("Inserted Collector:", collector)
+
+    # Get all rows from KPI table
+    all_kpi_rows = db_manager.get_all_kpi_rows()
+    LOGGER.info("All KPI Rows: %s", all_kpi_rows)
+
+    # Get all rows from Collector table
+    all_collector_rows = db_manager.get_all_collector_rows()
+    LOGGER.info("All Collector Rows: %s", all_collector_rows)
+
+    # Get filtered rows from KPI table
+    filtered_kpi_rows = db_manager.get_filtered_kpi_rows(kpi_description='Sample KPI')
+    LOGGER.info("Filtered KPI Rows: %s", filtered_kpi_rows)
+
+    # Get filtered rows from Collector table
+    filtered_collector_rows = db_manager.get_filtered_collector_rows(collector='Collector 1')
+    LOGGER.info("Filtered Collector Rows: %s", filtered_collector_rows)
+
+    # Delete a KPI by kpi_id
+    kpi_id_to_delete = '123e4567-e89b-12d3-a456-426614174000'
+    db_manager.delete_kpi_by_id(kpi_id_to_delete)
+
+    # Delete a Collector by collector_id
+    collector_id_to_delete = '123e4567-e89b-12d3-a456-426614174001'
+    db_manager.delete_collector_by_id(collector_id_to_delete)
diff --git a/src/telemetry/frontend/__init__.py b/src/telemetry/frontend/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..234a1af6588c91f6a17f3963f69120cd6e2248d9
--- /dev/null
+++ b/src/telemetry/frontend/__init__.py
@@ -0,0 +1,15 @@
+
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/telemetry/frontend/client/TelemetryFrontendClient.py b/src/telemetry/frontend/client/TelemetryFrontendClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd36ecd45933ad10758e408cf03c1bf834d27ba6
--- /dev/null
+++ b/src/telemetry/frontend/client/TelemetryFrontendClient.py
@@ -0,0 +1,70 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
+
+from common.proto.context_pb2 import Empty
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.proto.telemetry_frontend_pb2_grpc import TelemetryFrontendServiceStub
+from common.proto.telemetry_frontend_pb2 import Collector, CollectorId, CollectorFilter, CollectorList
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 10
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class TelemetryFrontendClient:
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.TELEMETRYFRONTEND)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
+        self.channel = None
+        self.stub = None
+        self.connect()
+        LOGGER.debug('Channel created')        
+
+    def connect(self):
+        self.channel = grpc.insecure_channel(self.endpoint)
+        self.stub = TelemetryFrontendServiceStub(self.channel)
+
+    def close(self):
+        if self.channel is not None: self.channel.close()
+        self.channel = None
+        self.stub = None
+
+    @RETRY_DECORATOR
+    def StartCollector(self, request : Collector) -> CollectorId: # type: ignore
+        LOGGER.debug('StartCollector: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.StartCollector(request)
+        LOGGER.debug('StartCollector result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def StopCollector(self, request : CollectorId) -> Empty: # type: ignore
+        LOGGER.debug('StopCollector: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.StopCollector(request)
+        LOGGER.debug('StopCollector result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def SelectCollectors(self, request : CollectorFilter) -> CollectorList: # type: ignore
+        LOGGER.debug('SelectCollectors: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SelectCollectors(request)
+        LOGGER.debug('SelectCollectors result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
diff --git a/src/telemetry/frontend/client/__init__.py b/src/telemetry/frontend/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/telemetry/frontend/client/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/telemetry/frontend/service/TelemetryFrontendService.py b/src/telemetry/frontend/service/TelemetryFrontendService.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc3f8df363a882db0f0ba3112a38f3bba3921c30
--- /dev/null
+++ b/src/telemetry/frontend/service/TelemetryFrontendService.py
@@ -0,0 +1,30 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from monitoring.service.NameMapping import NameMapping
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from common.proto.telemetry_frontend_pb2_grpc import add_TelemetryFrontendServiceServicer_to_server
+from telemetry.frontend.service.TelemetryFrontendServiceServicerImpl import TelemetryFrontendServiceServicerImpl
+
+
+class TelemetryFrontendService(GenericGrpcService):
+    def __init__(self, name_mapping : NameMapping, cls_name: str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND)
+        super().__init__(port, cls_name=cls_name)
+        self.telemetry_frontend_servicer = TelemetryFrontendServiceServicerImpl(name_mapping)
+
+    def install_servicers(self):
+        add_TelemetryFrontendServiceServicer_to_server(self.telemetry_frontend_servicer, self.server)
diff --git a/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py b/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6830ad676d3934c88b01575ebdd1d0549fb00d1
--- /dev/null
+++ b/src/telemetry/frontend/service/TelemetryFrontendServiceServicerImpl.py
@@ -0,0 +1,204 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import ast
+import threading
+import time
+from typing import Tuple, Any
+import grpc
+import logging
+
+from confluent_kafka import Consumer as KafkaConsumer
+from common.proto.context_pb2 import Empty
+from monitoring.service.NameMapping import NameMapping
+from confluent_kafka import Producer as KafkaProducer
+from confluent_kafka import KafkaException
+from confluent_kafka import KafkaError
+from common.proto.telemetry_frontend_pb2 import CollectorId, Collector, CollectorFilter, CollectorList
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+from common.proto.telemetry_frontend_pb2_grpc import TelemetryFrontendServiceServicer
+
+from telemetry.database.TelemetryModel import Collector as CollectorModel
+from telemetry.database.managementDB import managementDB
+
+LOGGER            = logging.getLogger(__name__)
+METRICS_POOL      = MetricsPool('Monitoring', 'TelemetryFrontend')
+KAFKA_SERVER_IP   = '127.0.0.1:9092'
+ACTIVE_COLLECTORS = []
+KAFKA_TOPICS      = {'request' : 'topic_request', 
+                     'response': 'topic_response'}
+
+
+class TelemetryFrontendServiceServicerImpl(TelemetryFrontendServiceServicer):
+    def __init__(self, name_mapping : NameMapping):
+        LOGGER.info('Init TelemetryFrontendService')
+        self.managementDBobj = managementDB()
+        self.kafka_producer = KafkaProducer({'bootstrap.servers': KAFKA_SERVER_IP,})
+        self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KAFKA_SERVER_IP,
+                                            'group.id'          : 'frontend',
+                                            'auto.offset.reset' : 'latest'})
+
+    def add_collector_to_db(self, request: Collector ): # type: ignore
+        try:
+            # Create a new Collector instance
+            collector_to_insert                     = CollectorModel()
+            collector_to_insert.collector_id        = request.collector_id.collector_id.uuid
+            collector_to_insert.kpi_id              = request.kpi_id.kpi_id.uuid
+            # collector_to_insert.collector_decription= request.collector
+            collector_to_insert.sampling_duration_s = request.duration_s
+            collector_to_insert.sampling_interval_s = request.interval_s
+            collector_to_insert.start_timestamp     = time.time()
+            collector_to_insert.end_timestamp       = time.time()
+            managementDB.add_row_to_db(collector_to_insert)
+        except Exception as e:
+            LOGGER.info("Unable to create collectorModel class object. {:}".format(e))
+
+    # @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def StartCollector(self, 
+                       request : Collector, grpc_context: grpc.ServicerContext # type: ignore
+                      ) -> CollectorId: # type: ignore
+        # push info to frontend db
+        LOGGER.info ("gRPC message: {:}".format(request))
+        response = CollectorId()
+        _collector_id       = str(request.collector_id.collector_id.uuid)
+        _collector_kpi_id   = str(request.kpi_id.kpi_id.uuid)
+        _collector_duration = int(request.duration_s)
+        _collector_interval = int(request.interval_s)
+        # pushing Collector to DB
+        self.add_collector_to_db(request)
+        self.publish_to_kafka_request_topic(_collector_id, _collector_kpi_id, _collector_duration, _collector_interval)
+        # self.run_publish_to_kafka_request_topic(_collector_id, _collector_kpi_id, _collector_duration, _collector_interval)
+        response.collector_id.uuid = request.collector_id.collector_id.uuid # type: ignore
+        return response
+    
+    def run_publish_to_kafka_request_topic(self, msg_key: str, kpi: str, duration : int, interval: int):
+        # Add threading.Thread() response to dictonary and call start() in the next statement
+        threading.Thread(target=self.publish_to_kafka_request_topic, args=(msg_key, kpi, duration, interval)).start()
+
+    def publish_to_kafka_request_topic(self, 
+                             collector_id: str, kpi: str, duration : int, interval: int
+                             ):
+        """
+        Method to generate collector request to Kafka topic.
+        """
+        # time.sleep(5)
+        # producer_configs = {
+        #     'bootstrap.servers': KAFKA_SERVER_IP,
+        # }
+        # topic_request = "topic_request"
+        msg_value : Tuple [str, int, int] = (kpi, duration, interval)
+        # print ("Request generated: ", "Colletcor Id: ", collector_id, \
+        #         ", \nKPI: ", kpi, ", Duration: ", duration, ", Interval: ", interval)
+        # producerObj = KafkaProducer(producer_configs)
+        self.kafka_producer.produce(KAFKA_TOPICS['request'], key=collector_id, value= str(msg_value), callback=self.delivery_callback)
+        # producerObj.produce(KAFKA_TOPICS['request'], key=collector_id, value= str(msg_value), callback=self.delivery_callback)
+        LOGGER.info("Collector Request Generated: {:}, {:}, {:}, {:}".format(collector_id, kpi, duration, interval))
+        # producerObj.produce(topic_request, key=collector_id, value= str(msg_value), callback=self.delivery_callback)
+        ACTIVE_COLLECTORS.append(collector_id)
+        self.kafka_producer.flush()
+
+    def run_kafka_listener(self):
+        # print ("--- STARTED: run_kafka_listener ---")
+        threading.Thread(target=self.kafka_listener).start()
+        return True
+
+    def kafka_listener(self):
+        """
+        listener for response on Kafka topic.
+        """
+        # # print ("--- STARTED: kafka_listener ---")
+        # conusmer_configs = {
+        #     'bootstrap.servers' : KAFKA_SERVER_IP,
+        #     'group.id'          : 'frontend',
+        #     'auto.offset.reset' : 'latest'
+        # }
+        # # topic_response = "topic_response"
+
+        # consumerObj = KafkaConsumer(conusmer_configs)
+        self.kafka_consumer.subscribe([KAFKA_TOPICS['response']])
+        # print (time.time())
+        while True:
+            receive_msg = self.kafka_consumer.poll(2.0)
+            if receive_msg is None:
+                # print (" - Telemetry frontend listening on Kafka Topic: ", KAFKA_TOPICS['response'])     # added for debugging purposes
+                continue
+            elif receive_msg.error():
+                if receive_msg.error().code() == KafkaError._PARTITION_EOF:
+                    continue
+                else:
+                    print("Consumer error: {}".format(receive_msg.error()))
+                    break
+            try:
+                collector_id = receive_msg.key().decode('utf-8')
+                if collector_id in ACTIVE_COLLECTORS:
+                    (kpi_id, kpi_value) = ast.literal_eval(receive_msg.value().decode('utf-8'))
+                    self.process_response(collector_id, kpi_id, kpi_value)
+                else:
+                    print(f"collector id does not match.\nRespone ID: '{collector_id}' --- Active IDs: '{ACTIVE_COLLECTORS}' ")
+            except Exception as e:
+                print(f"No message key found: {str(e)}")
+                continue
+                # return None
+
+    def process_response(self, collector_id: str, kpi_id: str, kpi_value: Any):
+        if kpi_id == "-1" and kpi_value == -1:
+            # LOGGER.info("Sucessfully terminated Collector: {:}".format(collector_id))
+            print ("Sucessfully terminated Collector: ", collector_id)
+        else:
+            print ("Frontend-Received values Collector Id:", collector_id, "-KPI:", kpi_id, "-VALUE:", kpi_value)
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def delivery_callback(self, err, msg):
+        """
+        Callback function to handle message delivery status.
+        Args:
+            err (KafkaError): Kafka error object.
+            msg (Message): Kafka message object.
+        """
+        if err:
+            print(f'Message delivery failed: {err}')
+        else:
+            print(f'Message delivered to topic {msg.topic()}')
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def StopCollector(self, 
+                      request : CollectorId, grpc_context: grpc.ServicerContext # type: ignore
+                     ) -> Empty:  # type: ignore
+        LOGGER.info ("gRPC message: {:}".format(request))
+        _collector_id = request.collector_id.uuid
+        self.publish_to_kafka_request_topic(_collector_id, "", -1, -1)
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SelectCollectors(self, 
+                         request : CollectorFilter, contextgrpc_context: grpc.ServicerContext # type: ignore
+                        ) -> CollectorList:  # type: ignore
+        LOGGER.info("gRPC message: {:}".format(request))
+        response = CollectorList()
+        filter_to_apply = dict()
+        filter_to_apply['kpi_id']       = request.kpi_id[0].kpi_id.uuid
+        # filter_to_apply['duration_s'] = request.duration_s[0]
+        try:
+            rows = self.managementDBobj.select_with_filter(CollectorModel, **filter_to_apply)
+        except Exception as e:
+            LOGGER.info('Unable to apply filter on kpi descriptor. {:}'.format(e))
+        try:
+            if len(rows) != 0:
+                for row in rows:
+                    collector_obj = Collector()
+                    collector_obj.collector_id.collector_id.uuid = row.collector_id
+                    response.collector_list.append(collector_obj)
+            return response
+        except Exception as e:
+            LOGGER.info('Unable to process response {:}'.format(e))
\ No newline at end of file
diff --git a/src/telemetry/frontend/service/__init__.py b/src/telemetry/frontend/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/telemetry/frontend/service/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/telemetry/frontend/service/__main__.py b/src/telemetry/frontend/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b0263706c3dad3756306d1ba8a3a104d568cd6f
--- /dev/null
+++ b/src/telemetry/frontend/service/__main__.py
@@ -0,0 +1,72 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import signal
+import sys
+import logging, threading
+from prometheus_client import start_http_server
+from monitoring.service.NameMapping import NameMapping
+from .TelemetryFrontendService import TelemetryFrontendService
+from monitoring.service.EventTools import EventsDeviceCollector
+from common.Settings import (
+    get_log_level, wait_for_environment_variables, get_env_var_name, 
+    get_metrics_port )
+
+terminate = threading.Event()
+LOGGER = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global LOGGER
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+    LOGGER = logging.getLogger(__name__)
+
+# ------- will be added later --------------
+    # wait_for_environment_variables([
+    #     get_env_var_name
+
+
+    # ])
+# ------- will be added later --------------
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.info('Starting...')
+
+    # Start metrics server
+    metrics_port = get_metrics_port()
+    start_http_server(metrics_port)
+
+    name_mapping = NameMapping()
+
+    grpc_service = TelemetryFrontendService(name_mapping)
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=1.0): pass
+
+    LOGGER.info('Terminating...')
+    grpc_service.stop()
+
+    LOGGER.info('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
\ No newline at end of file
diff --git a/src/telemetry/frontend/tests/Messages.py b/src/telemetry/frontend/tests/Messages.py
new file mode 100644
index 0000000000000000000000000000000000000000..1205898d13a610cd262979242e4f489f5e35cdb8
--- /dev/null
+++ b/src/telemetry/frontend/tests/Messages.py
@@ -0,0 +1,83 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid
+import random
+from common.proto import telemetry_frontend_pb2
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+
+
+# ----------------------- "2nd" Iteration --------------------------------
+def create_collector_id():
+    _collector_id                   = telemetry_frontend_pb2.CollectorId()
+    _collector_id.collector_id.uuid = uuid.uuid4()
+    return _collector_id
+
+# def create_collector_id_a(coll_id_str : str):
+#     _collector_id                   = telemetry_frontend_pb2.CollectorId()
+#     _collector_id.collector_id.uuid = str(coll_id_str)
+#     return _collector_id
+
+def create_collector_request():
+    _create_collector_request                                = telemetry_frontend_pb2.Collector()
+    _create_collector_request.collector_id.collector_id.uuid = str(uuid.uuid4())
+    _create_collector_request.kpi_id.kpi_id.uuid             = "165d20c5-a446-42fa-812f-e2b7ed283c6f"
+    # _create_collector_request.collector                      = "collector description"
+    _create_collector_request.duration_s                     = float(random.randint(8, 16))
+    _create_collector_request.interval_s                     = float(random.randint(2, 4))
+    return _create_collector_request
+
+def create_collector_filter():
+    _create_collector_filter = telemetry_frontend_pb2.CollectorFilter()
+    new_kpi_id               = _create_collector_filter.kpi_id.add()
+    new_kpi_id.kpi_id.uuid   = "165d20c5-a446-42fa-812f-e2b7ed283c6f"
+    return _create_collector_filter
+
+# ----------------------- "First" Iteration --------------------------------
+# def create_collector_request_a():
+#     _create_collector_request_a                                = telemetry_frontend_pb2.Collector()
+#     _create_collector_request_a.collector_id.collector_id.uuid = "-1"
+#     return _create_collector_request_a
+
+# def create_collector_request_b(str_kpi_id, coll_duration_s, coll_interval_s
+#                                ) -> telemetry_frontend_pb2.Collector:
+#     _create_collector_request_b                                = telemetry_frontend_pb2.Collector()
+#     _create_collector_request_b.collector_id.collector_id.uuid = '1'
+#     _create_collector_request_b.kpi_id.kpi_id.uuid             = str_kpi_id
+#     _create_collector_request_b.duration_s                     = coll_duration_s
+#     _create_collector_request_b.interval_s                     = coll_interval_s
+#     return _create_collector_request_b
+
+# def create_collector_filter():
+#     _create_collector_filter = telemetry_frontend_pb2.CollectorFilter()
+#     new_collector_id                       = _create_collector_filter.collector_id.add()
+#     new_collector_id.collector_id.uuid     = "COLL1"
+#     new_kpi_id                             = _create_collector_filter.kpi_id.add()
+#     new_kpi_id.kpi_id.uuid                 = "KPI1"
+#     new_device_id                          = _create_collector_filter.device_id.add()
+#     new_device_id.device_uuid.uuid         = 'DEV1'
+#     new_service_id                         = _create_collector_filter.service_id.add()
+#     new_service_id.service_uuid.uuid       = 'SERV1'
+#     new_slice_id                           = _create_collector_filter.slice_id.add()
+#     new_slice_id.slice_uuid.uuid           = 'SLC1'
+#     new_endpoint_id                        = _create_collector_filter.endpoint_id.add()
+#     new_endpoint_id.endpoint_uuid.uuid     = 'END1'
+#     new_connection_id                      = _create_collector_filter.connection_id.add()
+#     new_connection_id.connection_uuid.uuid = 'CON1'
+#     _create_collector_filter.kpi_sample_type.append(KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED)
+#     return _create_collector_filter
+
+# def create_collector_list():
+#     _create_collector_list = telemetry_frontend_pb2.CollectorList()
+#     return _create_collector_list
\ No newline at end of file
diff --git a/src/telemetry/frontend/tests/__init__.py b/src/telemetry/frontend/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/telemetry/frontend/tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/telemetry/frontend/tests/test_frontend.py b/src/telemetry/frontend/tests/test_frontend.py
new file mode 100644
index 0000000000000000000000000000000000000000..002cc430721845aa5aa18274375e2c22b5d77ff7
--- /dev/null
+++ b/src/telemetry/frontend/tests/test_frontend.py
@@ -0,0 +1,204 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import time
+import pytest
+import logging
+from typing import Union
+
+from common.proto.context_pb2 import Empty
+from common.Constants import ServiceNameEnum
+from common.proto.telemetry_frontend_pb2 import CollectorId, CollectorList
+from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
+from context.client.ContextClient import ContextClient
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
+from common.Settings import ( 
+    get_service_port_grpc, get_env_var_name, ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC)
+
+from telemetry.frontend.client.TelemetryFrontendClient import TelemetryFrontendClient
+from telemetry.frontend.service.TelemetryFrontendService import TelemetryFrontendService
+from telemetry.frontend.service.TelemetryFrontendServiceServicerImpl import TelemetryFrontendServiceServicerImpl
+from telemetry.frontend.tests.Messages import ( create_collector_request, create_collector_filter)
+from telemetry.database.managementDB import managementDB
+from telemetry.database.TelemetryEngine import TelemetryEngine
+
+from device.client.DeviceClient import DeviceClient
+from device.service.DeviceService import DeviceService
+from device.service.driver_api.DriverFactory import DriverFactory
+from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
+
+from monitoring.service.NameMapping import NameMapping
+
+os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE'
+from device.service.drivers import DRIVERS
+
+###########################
+# Tests Setup
+###########################
+
+LOCAL_HOST = '127.0.0.1'
+MOCKSERVICE_PORT = 10000
+
+TELEMETRY_FRONTEND_PORT = str(MOCKSERVICE_PORT) + str(get_service_port_grpc(ServiceNameEnum.TELEMETRYFRONTEND))
+os.environ[get_env_var_name(ServiceNameEnum.TELEMETRYFRONTEND, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.TELEMETRYFRONTEND, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(TELEMETRY_FRONTEND_PORT)
+
+LOGGER = logging.getLogger(__name__)
+
+class MockContextService(GenericGrpcService):
+    # Mock Service implementing Context to simplify unitary tests of Monitoring
+
+    def __init__(self, bind_port: Union[str, int]) -> None:
+        super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService')
+
+    # pylint: disable=attribute-defined-outside-init
+    def install_servicers(self):
+        self.context_servicer = MockServicerImpl_Context()
+        add_ContextServiceServicer_to_server(self.context_servicer, self.server)
+
+@pytest.fixture(scope='session')
+def context_service():
+    LOGGER.info('Initializing MockContextService...')
+    _service = MockContextService(MOCKSERVICE_PORT)
+    _service.start()
+    
+    LOGGER.info('Yielding MockContextService...')
+    yield _service
+
+    LOGGER.info('Terminating MockContextService...')
+    _service.context_servicer.msg_broker.terminate()
+    _service.stop()
+
+    LOGGER.info('Terminated MockContextService...')
+
+@pytest.fixture(scope='session')
+def context_client(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument
+    LOGGER.info('Initializing ContextClient...')
+    _client = ContextClient()
+    
+    LOGGER.info('Yielding ContextClient...')
+    yield _client
+
+    LOGGER.info('Closing ContextClient...')
+    _client.close()
+
+    LOGGER.info('Closed ContextClient...')
+
+@pytest.fixture(scope='session')
+def device_service(context_service : MockContextService): # pylint: disable=redefined-outer-name,unused-argument
+    LOGGER.info('Initializing DeviceService...')
+    driver_factory = DriverFactory(DRIVERS)
+    driver_instance_cache = DriverInstanceCache(driver_factory)
+    _service = DeviceService(driver_instance_cache)
+    _service.start()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding DeviceService...')
+    yield _service
+
+    LOGGER.info('Terminating DeviceService...')
+    _service.stop()
+
+    LOGGER.info('Terminated DeviceService...')
+
+@pytest.fixture(scope='session')
+def device_client(device_service : DeviceService): # pylint: disable=redefined-outer-name,unused-argument
+    LOGGER.info('Initializing DeviceClient...')
+    _client = DeviceClient()
+
+    LOGGER.info('Yielding DeviceClient...')
+    yield _client
+
+    LOGGER.info('Closing DeviceClient...')
+    _client.close()
+
+    LOGGER.info('Closed DeviceClient...')
+
+@pytest.fixture(scope='session')
+def telemetryFrontend_service(
+        context_service : MockContextService,
+        device_service  : DeviceService
+    ):
+    LOGGER.info('Initializing TelemetryFrontendService...')
+    name_mapping = NameMapping()
+
+    _service = TelemetryFrontendService(name_mapping)
+    _service.start()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding TelemetryFrontendService...')
+    yield _service
+
+    LOGGER.info('Terminating TelemetryFrontendService...')
+    _service.stop()
+
+    LOGGER.info('Terminated TelemetryFrontendService...')
+
+@pytest.fixture(scope='session')
+def telemetryFrontend_client(
+        telemetryFrontend_service : TelemetryFrontendService
+    ):
+    LOGGER.info('Initializing TelemetryFrontendClient...')
+    _client = TelemetryFrontendClient()
+
+    # yield the server, when test finishes, execution will resume to stop it
+    LOGGER.info('Yielding TelemetryFrontendClient...')
+    yield _client
+
+    LOGGER.info('Closing TelemetryFrontendClient...')
+    _client.close()
+
+    LOGGER.info('Closed TelemetryFrontendClient...')
+
+
+###########################
+# Tests Implementation of Telemetry Frontend
+###########################
+
+def test_verify_db_and_table():
+    LOGGER.info(' >>> test_verify_database_and_tables START: <<< ')
+    _engine = TelemetryEngine.get_engine()
+    managementDB.create_database(_engine)
+    managementDB.create_tables(_engine)
+
+def test_StartCollector(telemetryFrontend_client):
+    LOGGER.info(' >>> test_StartCollector START: <<< ')
+    response = telemetryFrontend_client.StartCollector(create_collector_request())
+    LOGGER.debug(str(response))
+    assert isinstance(response, CollectorId)
+
+def test_run_kafka_listener():
+    LOGGER.info(' >>> test_run_kafka_listener START: <<< ')
+    name_mapping = NameMapping()
+    TelemetryFrontendServiceObj = TelemetryFrontendServiceServicerImpl(name_mapping)
+    response = TelemetryFrontendServiceObj.run_kafka_listener()     # Method "run_kafka_listener" is not define in frontend.proto
+    LOGGER.debug(str(response))
+    assert isinstance(response, bool)
+
+def test_StopCollector(telemetryFrontend_client):
+    LOGGER.info(' >>> test_StopCollector START: <<< ')
+    _collector_id = telemetryFrontend_client.StartCollector(create_collector_request())
+    time.sleep(3)   # wait for small amount before call the stopCollecter()
+    response = telemetryFrontend_client.StopCollector(_collector_id)
+    LOGGER.debug(str(response))
+    assert isinstance(response, Empty)
+
+def test_select_collectors(telemetryFrontend_client):
+    LOGGER.info(' >>> test_select_collector requesting <<< ')
+    response = telemetryFrontend_client.SelectCollectors(create_collector_filter())
+    LOGGER.info('Received Rows after applying Filter: {:} '.format(response))
+    LOGGER.debug(str(response))
+    assert isinstance(response, CollectorList)
\ No newline at end of file
diff --git a/src/telemetry/requirements.in b/src/telemetry/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..a0e78d2bfb7270b9664ad5ba810e2f213d887bf7
--- /dev/null
+++ b/src/telemetry/requirements.in
@@ -0,0 +1,24 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+anytree==2.8.0
+APScheduler==3.10.1
+influx-line-protocol==0.1.4
+psycopg2-binary==2.9.3
+python-dateutil==2.8.2
+python-json-logger==2.0.2
+pytz==2024.1
+questdb==1.0.1
+requests==2.27.1
+xmltodict==0.12.0
\ No newline at end of file
diff --git a/src/telemetry/telemetry_virenv.txt b/src/telemetry/telemetry_virenv.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e39f80b6593d6c41411751cdd0ea59ee05344570
--- /dev/null
+++ b/src/telemetry/telemetry_virenv.txt
@@ -0,0 +1,49 @@
+anytree==2.8.0
+APScheduler==3.10.1
+attrs==23.2.0
+certifi==2024.2.2
+charset-normalizer==2.0.12
+colorama==0.4.6
+confluent-kafka==2.3.0
+coverage==6.3
+future-fstrings==1.2.0
+greenlet==3.0.3
+grpcio==1.47.5
+grpcio-health-checking==1.47.5
+grpcio-tools==1.47.5
+grpclib==0.4.4
+h2==4.1.0
+hpack==4.0.0
+hyperframe==6.0.1
+idna==3.7
+influx-line-protocol==0.1.4
+iniconfig==2.0.0
+kafka-python==2.0.2
+multidict==6.0.5
+networkx==3.3
+packaging==24.0
+pluggy==1.5.0
+prettytable==3.5.0
+prometheus-client==0.13.0
+protobuf==3.20.3
+psycopg2-binary==2.9.3
+py==1.11.0
+py-cpuinfo==9.0.0
+pytest==6.2.5
+pytest-benchmark==3.4.1
+pytest-depends==1.0.1
+python-dateutil==2.8.2
+python-json-logger==2.0.2
+pytz==2024.1
+questdb==1.0.1
+requests==2.27.1
+six==1.16.0
+SQLAlchemy==1.4.52
+sqlalchemy-cockroachdb==1.4.4
+SQLAlchemy-Utils==0.38.3
+toml==0.10.2
+typing_extensions==4.12.0
+tzlocal==5.2
+urllib3==1.26.18
+wcwidth==0.2.13
+xmltodict==0.12.0
diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py
index 9b6950aeb0f3fda630db87d5d80ad2bf730fac3d..b864d3549e051b54e888c80547724da14fec5f67 100644
--- a/src/webui/service/__init__.py
+++ b/src/webui/service/__init__.py
@@ -49,7 +49,10 @@ def json_to_list(json_str : str) -> List[Union[str, Tuple[str, str]]]:
     if isinstance(data, dict):
         return [('kv', (key, value)) for key, value in data.items()]
     elif isinstance(data, list):
-        return [('item', ', '.join(data))]
+        if len(data) == 1 and isinstance(data[0], dict):
+            return [('kv', (key, value)) for key, value in data[0].items()]
+        else:
+            return [('item', ', '.join([str(d) for d in data]))]
     else:
         return [('item', str(data))]
 
diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py
index fb930024d9fc3ff0b70b77345041e33dcb0d28c2..b7fdb78e85dc634627de02947c0861a7f13bdae9 100644
--- a/src/webui/service/device/routes.py
+++ b/src/webui/service/device/routes.py
@@ -165,6 +165,16 @@ def inventory(device_uuid: str):
     context_client.close()
     return render_template('device/inventory.html', device=device_obj)
 
+@device.route('logical/<path:device_uuid>', methods=['GET', 'POST'])
+def logical(device_uuid: str):
+    context_client.connect()
+    device_obj = get_device(context_client, device_uuid, rw_copy=False)
+    if device_obj is None:
+        flash('Device({:s}) not found'.format(str(device_uuid)), 'danger')
+        device_obj = Device()
+    context_client.close()
+    return render_template('device/logical.html', device=device_obj)
+
 @device.get('<path:device_uuid>/delete')
 def delete(device_uuid):
     try:
diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html
index 66e188465994a47f173dcca93237b46cd86adb16..c154346204a4ad59eec54a7e9ae3956a7f3db655 100644
--- a/src/webui/service/templates/base.html
+++ b/src/webui/service/templates/base.html
@@ -156,7 +156,7 @@
           <div class="container">
             <div class="row">
               <div class="col-md-12">
-                <p class="text-center" style="color: white;">&copy; 2022-2023 <a href="https://tfs.etsi.org/">ETSI TeraFlowSDN (TFS) OSG</a></p>
+                <p class="text-center" style="color: white;">&copy; 2022-2024 <a href="https://tfs.etsi.org/">ETSI OSG/SDG TeraFlowSDN (TFS)</a></p>
               </div>
             </div>
             <div class="row">
diff --git a/src/webui/service/templates/device/home.html b/src/webui/service/templates/device/home.html
index fca799acafd965442bc0ee998ae70f9c76a45e32..b1237eac1fde1a16dea30515dd6fcbe353bcdca5 100644
--- a/src/webui/service/templates/device/home.html
+++ b/src/webui/service/templates/device/home.html
@@ -51,6 +51,7 @@
             <th scope="col">Config Rules</th>
             <th scope="col"></th>
             <th scope="col"></th>
+            <th scope="col"></th>
           </tr>
         </thead>
         <tbody>
@@ -83,6 +84,14 @@
                               </svg>
                         </a>
                     </td>
+                    <td>
+                        <a href="{{ url_for('device.logical', device_uuid=device.device_id.device_uuid.uuid) }}">
+                            <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle" viewBox="0 0 16 16">
+                                <path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14m0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16"/>
+                                <path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0"/>
+                              </svg>
+                        </a>
+                    </td>
                 </tr>
                 {% endfor %}
             {% else %}
diff --git a/src/webui/service/templates/device/logical.html b/src/webui/service/templates/device/logical.html
new file mode 100644
index 0000000000000000000000000000000000000000..8a2541989f0ed51c1257aae5fe8d76bfd01ff5c3
--- /dev/null
+++ b/src/webui/service/templates/device/logical.html
@@ -0,0 +1,397 @@
+<!--
+    Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+   
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+   
+         http://www.apache.org/licenses/LICENSE-2.0
+   
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+   -->
+
+{% extends 'base.html' %}
+
+{% block content %}
+<style>
+    ul,
+    #myUL {
+        list-style-type: none;
+    }
+
+    #myUL {
+        margin: 0;
+        padding: 0;
+    }
+
+    .caret {
+        cursor: pointer;
+        -webkit-user-select: none;
+        /* Safari 3.1+ */
+        -moz-user-select: none;
+        /* Firefox 2+ */
+        -ms-user-select: none;
+        /* IE 10+ */
+        user-select: none;
+    }
+
+    .caret::before {
+        content: "\25B6";
+        color: black;
+        display: inline-block;
+        margin-right: 6px;
+    }
+
+    .caret-down::before {
+        -ms-transform: rotate(90deg);
+        /* IE 9 */
+        -webkit-transform: rotate(90deg);
+        /* Safari */
+        transform: rotate(90deg);
+    }
+
+    .nested {
+        display: none;
+    }
+
+    .active {
+        display: block;
+    }
+</style>
+
+<h1>Device {{ device.name }} ({{ device.device_id.device_uuid.uuid }})</h1>
+
+<div class="row mb-3">
+    <div class="col-sm-3">
+        <button type="button" class="btn btn-success" onclick="window.location.href='{{ url_for('device.home') }}'">
+            <i class="bi bi-box-arrow-in-left"></i>
+            Back to device list
+        </button>
+    </div>
+</div>
+<br>
+
+<div class="row mb-3">
+    <div>
+        <ul id="myUL">
+            <li><span class="caret">ACL</span>
+                <ul class="nested">
+                    {% set acl_names = [] %}
+                    {% for config in device.device_config.config_rules %}
+                        {% if config.WhichOneof('config_rule') == 'custom' %}
+                            {% if '/acl/' in config.custom.resource_key %}
+                                {% if 'acl-set' in config.custom.resource_key %}
+                                    {% set acl_name = config.custom.resource_key.split('acl-set[')[1].split('][')[0] %}
+                                {% else %}
+                                    {% set acl_name = config.custom.resource_key.split('ress[')[1].split('][')[0] %}
+                                {% endif %}
+                                {% if acl_name|length == 0 %} 
+                                    {% set acl_name = 'Undefined' %}
+                                {% endif %}
+                                {% if acl_name not in acl_names %}
+                                    {% set _ = acl_names.append(acl_name) %}
+                                {% endif %}
+                            {% endif %}
+                        {% endif %}
+                    {% endfor %}
+                    {% for acl_name in acl_names %}
+                        <li><span class="caret">{{ acl_name }}</span>
+                            <ul class="nested">
+                                {% for config in device.device_config.config_rules %}
+                                    {% if config.WhichOneof('config_rule') == 'custom' %}
+                                        {% if '/acl/' in config.custom.resource_key and acl_name in config.custom.resource_key.split('][')[0] %}
+                                            {% if 'acl-entry' in config.custom.resource_key %}
+                                                {% set rule_number = config.custom.resource_key.split('acl-entry[')[1].split(']')[0] %}
+                                                <li><span><b>Rule {{ rule_number }}:</b> {{ config.custom.resource_value }}</span></li>
+                                            {% else %}
+                                                <li><span><b>Interface:</b> {{ config.custom.resource_value }}</span></li>
+                                            {% endif %}
+                                        {% endif %}
+                                    {% endif %}
+                                {% endfor %}
+                            </ul>
+                        </li>
+                    {% endfor %}
+                </ul>
+            </li>
+        </ul>
+
+        <ul id="myUL">
+            <li><span class="caret">Routing Policy</span>
+                <ul class="nested">
+                    {% set pol_names = [] %}
+                    {% for config in device.device_config.config_rules %}
+                        {% if config.WhichOneof('config_rule') == 'custom' %}
+                            {% if '/routing_policy/' in config.custom.resource_key %}
+                                {% if 'policy_definition' in config.custom.resource_key %}
+                                    {% set pol_name = config.custom.resource_key.split('policy_definition[')[1].split(']')[0] %}
+                                {% endif %}
+                                {% if pol_name|length == 0 %} 
+                                    {% set pol_name = 'Undefined' %}
+                                {% endif %}
+                                {% if pol_name not in pol_names %}
+                                    {% set _ = pol_names.append(pol_name) %}
+                                {% endif %}
+                            {% endif %}
+                        {% endif %}
+                    {% endfor %}
+                    {% for pol_name in pol_names %}
+                        <li><span class="caret">{{ pol_name }}</span>
+                            <ul class="nested">
+                            {% for config in device.device_config.config_rules %}
+                                {% if config.WhichOneof('config_rule') == 'custom' %}
+                                    {% if '/routing_policy/' in config.custom.resource_key and pol_name in config.custom.resource_key.split('[')[1].split(']')[0] %}
+                                        {% if 'policy_definition' not  in config.custom.resource_key %}
+                                            <li><span>{{ config.custom.resource_value }}</span></li>
+                                        {% endif %}
+                                    {% endif %}
+                                {% endif %}
+                            {% endfor %}
+                            </ul>
+                        </li>
+                    {% endfor %}
+                </ul>
+            </li>
+        </ul>
+
+        <ul id="myUL">
+            <li><span class="caret">VRFs</span>
+                <ul class="nested">
+                    <li><span class="caret">VRF default</span>
+                        <ul class="nested">
+                            {% for config in device.device_config.config_rules %}
+                                {% if config.WhichOneof('config_rule') == 'custom' %}
+                                    {% if '/network_instance' in config.custom.resource_key and  config.custom.resource_key.split('[')[1].split(']')[0] in 'default' %}
+                                        {% if ']/' in config.custom.resource_key%}
+                                            {% set aux = config.custom.resource_key.split(']/')[1].split('[')[0] %}
+                                            <li><span><b> {{ aux.replace('_', ' ').title() }}:</b> {{ config.custom.resource_value }}</span></li>
+                                        {% else %}
+                                            <li><span><b> Network Instance:</b> {{ config.custom.resource_value }}</span></li>
+                                        {% endif %}
+                                    {% endif %}
+                                {% endif %}
+                            {% endfor %}
+                        </ul>
+                    </li>
+
+                    <li><span class="caret">L3VPN</span>
+                        <ul class="nested">
+                            {% set vpn_names = [] %}
+                            {% for config in device.device_config.config_rules %}
+                                {% if config.WhichOneof('config_rule') == 'custom' %}
+                                    {% if '/network_instance' in config.custom.resource_key %}
+
+                                        {% if 'L3VRF' in config.custom.resource_value %}
+                                            {% set vpn_name = config.custom.resource_key.split('network_instance[')[1].split(']')[0] %}
+                                            {% if vpn_name not in vpn_names %}
+                                                {% set _ = vpn_names.append(vpn_name) %}
+                                            {% endif %}
+                                        {% endif %}
+                                    {% endif %}
+                                {% endif %}
+                            {% endfor %}
+                            {% for vpn_name in vpn_names %}
+                                <li><span class="caret">{{ vpn_name }}</span>
+                                    <ul class="nested">
+                                        {% for config in device.device_config.config_rules %}
+                                            {% if config.WhichOneof('config_rule') == 'custom' %}
+                                                {% if '/network_instance' in config.custom.resource_key and  config.custom.resource_key.split('[')[1].split(']')[0] in vpn_name %}
+                                                    {% if ']/' in config.custom.resource_key%}
+                                                        {% set aux = config.custom.resource_key.split(']/')[1].split('[')[0] %}
+                                                        <li><span><b> {{ aux.replace('_', ' ').title() }}:</b> {{ config.custom.resource_value }}</span></li>
+                                                    {% else %}
+                                                        <li><span><b> Network Instance:</b> {{ config.custom.resource_value }}</span></li>
+                                                    {% endif %}
+                                                {% endif %}
+                                            {% endif %}
+                                        {% endfor %}
+                                    </ul>
+                                </li>
+                            {% endfor %}
+                        </ul>
+                    </li>
+
+                    <li><span class="caret">L2VPN</span>
+                        <ul class="nested">
+                            {% set vpn_names = [] %}
+                            {% for config in device.device_config.config_rules %}
+                                {% if config.WhichOneof('config_rule') == 'custom' %}
+                                    {% if '/network_instance' in config.custom.resource_key %}
+        
+                                        {% if 'L2VSI' in config.custom.resource_value %}
+                                            {% set vpn_name = config.custom.resource_key.split('network_instance[')[1].split(']')[0] %}
+                                            {% if vpn_name not in vpn_names %}
+                                                {% set _ = vpn_names.append(vpn_name) %}
+                                            {% endif %}
+                                        {% endif %}
+                                    {% endif %}
+                                {% endif %}
+                            {% endfor %}
+                            {% for vpn_name in vpn_names %}
+                                <li><span class="caret">{{ vpn_name }}</span>
+                                    <ul class="nested">
+                                        {% for config in device.device_config.config_rules %}
+                                            {% if config.WhichOneof('config_rule') == 'custom' %}
+                                                {% if '/network_instance' in config.custom.resource_key and  config.custom.resource_key.split('[')[1].split(']')[0] in vpn_name %}
+                                                    {% if ']/' in config.custom.resource_key%}
+                                                        {% set aux = config.custom.resource_key.split(']/')[1].split('[')[0] %}
+                                                        <li><span><b> {{ aux.replace('_', ' ').title() }}:</b> {{ config.custom.resource_value }}</span></li>
+                                                    {% else %}
+                                                        <li><span><b> Network Instance:</b> {{ config.custom.resource_value }}</span></li>
+                                                    {% endif %}
+                                                {% endif %}
+                                            {% endif %}
+                                        {% endfor %}
+                                    </ul>
+                                </li>
+                            {% endfor %}
+                        </ul>
+                    </li>
+                </ul>
+            </li>
+        </ul>
+
+        <ul id="myUL">
+            <li><span class="caret">Interfaces</span>
+                <ul class="nested">
+                    <li><span class="caret">Logical Interfaces</span>
+                        <ul class="nested">
+                            {% set interface_names = [] %}
+                            {% for config in device.device_config.config_rules %}
+                                {% if config.WhichOneof('config_rule') == 'custom' %}
+                                    {% if '/interface[' in config.custom.resource_key %}
+                                        {% if 'ethernetCsmacd' in config.custom.resource_value %}
+                                            {% set interface_name = config.custom.resource_key.split('interface[')[1].split(']')[0] %}
+                                            <li><span>{{ interface_name}}:</span> {{config.custom.resource_value}}</li>     
+                                        {% endif %}
+                                    {% endif %}
+                                {% endif %}
+                            {% endfor %}
+                        </ul>
+                    </li>
+
+                    <li><span class="caret">Loopback</span>
+                        <ul class="nested">
+                            {% set interface_names = [] %}
+                            {% for config in device.device_config.config_rules %}
+                                {% if config.WhichOneof('config_rule') == 'custom' %}
+                                    {% if '/interface[' in config.custom.resource_key %}
+                                        {% if 'softwareLoopback' in config.custom.resource_value %}
+                                            {% set interface_name = config.custom.resource_key.split('interface[')[1].split(']')[0] %}
+                                            {% if interface_name not in interface_names %}
+                                                {% set _ = interface_names.append(interface_name) %}
+                                            {% endif %}
+                                        {% endif %}
+                                    {% endif %}
+                                {% endif %}
+                            {% endfor %}
+                            {% for interface_name in interface_names %}
+                                <li><span class="caret">{{ interface_name }}</span>
+                                    <ul class="nested">
+                                        {% for config in device.device_config.config_rules %}
+                                            {% if config.WhichOneof('config_rule') == 'custom' %}
+                                                {% if '/interface' in config.custom.resource_key and config.custom.resource_key.split('[')[1].split(']')[0] in interface_name %}
+                                                    {% if 'subinterface' in config.custom.resource_key %}
+                                                        {% set subinterface_name = config.custom.resource_key.split('subinterface[')[1].split(']')[0] %}
+                                                        <li><span><b>Subinterface {{subinterface_name}}: </b>{{ config.custom.resource_value }}</span></li>
+                                                    {% else %}
+                                                        <li><span>{{ config.custom.resource_value }}</span></li>
+                                                    {% endif %}
+                                                {% endif %}
+                                            {% endif %}
+                                        {% endfor %}
+                                    </ul>
+                                </li>
+                            {% endfor %}
+                        </ul>
+                    </li>
+
+                    <li><span class="caret">Interfaces L3</span>
+                        <ul class="nested">
+                            {% set interface_names = [] %}
+                            {% for config in device.device_config.config_rules %}
+                                {% if config.WhichOneof('config_rule') == 'custom' %}
+                                    {% if '/interface[' in config.custom.resource_key %}
+                                        {% if 'l3ipvlan' in config.custom.resource_value %}
+                                            {% set interface_name = config.custom.resource_key.split('interface[')[1].split(']')[0] %}
+                                            {% if interface_name not in interface_names %}
+                                                {% set _ = interface_names.append(interface_name) %}
+                                            {% endif %}
+                                        {% endif %}
+                                    {% endif %}
+                                {% endif %}
+                            {% endfor %}
+                            {% for interface_name in interface_names %}
+                                <li><span class="caret">{{ interface_name }}</span>
+                                    <ul class="nested">
+                                        {% for config in device.device_config.config_rules %}
+                                            {% if config.WhichOneof('config_rule') == 'custom' %}
+                                                {% if '/interface' in config.custom.resource_key and  '/subinterface' in config.custom.resource_key and config.custom.resource_key.split('[')[1].split(']')[0] in interface_name %}
+                                                    <li><span>{{ config.custom.resource_value }}</span></li>
+                                                {% endif %}
+                                            {% endif %}
+                                        {% endfor %}
+                                    </ul>
+                                </li>
+                            {% endfor %}
+                        </ul>
+                    </li>
+
+                    <li><span class="caret">Interfaces L2</span>
+                        <ul class="nested">
+                            {% set interface_names = [] %}
+                            {% for config in device.device_config.config_rules %}
+                                {% if config.WhichOneof('config_rule') == 'custom' %}
+                                    {% if '/interface[' in config.custom.resource_key %}
+                                        {% if 'l2vlan' in config.custom.resource_value or 'mplsTunnel' in config.custom.resource_value %}
+                                            {% set interface_name = config.custom.resource_key.split('interface[')[1].split(']')[0] %}
+                                            {% if interface_name not in interface_names %}
+                                                {% set _ = interface_names.append(interface_name) %}
+                                            {% endif %}
+                                        {% endif %}
+                                    {% endif %}
+                                {% endif %}
+                            {% endfor %}
+                            {% for interface_name in interface_names %}
+                                <li><span class="caret">{{ interface_name }}</span>
+                                    <ul class="nested">
+                                        {% for config in device.device_config.config_rules %}
+                                            {% if config.WhichOneof('config_rule') == 'custom' %}
+                                                {% if 'subinterface' in config.custom.resource_key %}
+                                                    {% if '/interface' in config.custom.resource_key and  '/subinterface' in config.custom.resource_key and config.custom.resource_key.split('[')[1].split(']')[0] in interface_name %}
+                                                        <li><span>{{ config.custom.resource_value }}</span></li>
+                                                    {% endif %}
+                                                {% else %}
+                                                    {% if '/interface' in config.custom.resource_key and config.custom.resource_key.split('[')[1].split(']')[0] in interface_name %}
+                                                        <li><span>{{ config.custom.resource_value }}</span></li>
+                                                    {% endif %}
+                                                {% endif %}
+                                            {% endif %}
+                                        {% endfor %}
+                                    </ul>
+                                </li>
+                            {% endfor %}
+                        </ul>
+                    </li>
+                </ul>
+            </li>
+        </ul>
+
+        <script>
+            var toggler = document.getElementsByClassName("caret");
+            var i;
+            for (i = 0; i < toggler.length; i++) {
+                toggler[i].addEventListener("click", function() {
+                    this.parentElement.querySelector(".nested").classList.toggle("active");
+                    this.classList.toggle("caret-down");
+                });
+            }
+        </script>
+    </div>
+</div>
+
+{% endblock %}