diff --git a/common_requirements.in b/common_requirements.in
index e1bcad78bfc23217633fb28ef28a2d70d070644a..b277265768c9726f17ab046d8aa932167615f523 100644
--- a/common_requirements.in
+++ b/common_requirements.in
@@ -15,6 +15,7 @@
 coverage==6.3
 grpcio==1.47.*
 grpcio-health-checking==1.47.*
+grpcio-reflection==1.47.*
 grpcio-tools==1.47.*
 grpclib==0.4.4
 prettytable==3.5.0
diff --git a/deploy/nats.sh b/deploy/nats.sh
index cb3dd23183d0f3004e8ffb3c82b3cc91414bf704..b6df8066b2eb78263335a8a7831579e0e036b37a 100755
--- a/deploy/nats.sh
+++ b/deploy/nats.sh
@@ -69,8 +69,7 @@ function nats_deploy_single() {
         echo ">>> NATS is present; skipping step."
     else
         echo ">>> Deploy NATS"
-        helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine --set config.cluster.enabled=true --set config.cluster.tls.enabled=true
-
+        helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine
 
         echo ">>> Waiting NATS statefulset to be created..."
         while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
diff --git a/deploy/tfs.sh b/deploy/tfs.sh
index f396094080c8ec4a33b016b88bc0137a3a32e65c..a1429e443eaa70252ab2dd1f673ce46826b28744 100755
--- a/deploy/tfs.sh
+++ b/deploy/tfs.sh
@@ -153,7 +153,7 @@ kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type=
     --from-literal=CRDB_SSLMODE=require
 printf "\n"
 
-echo ">>> Create Secret with Apache Kakfa..."
+echo ">>> Create Secret with Apache Kafka..."
 KFK_SERVER_PORT=$(kubectl --namespace ${KFK_NAMESPACE} get service kafka-service -o 'jsonpath={.spec.ports[0].port}')
 kubectl create secret generic kfk-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
     --from-literal=KFK_NAMESPACE=${KFK_NAMESPACE} \
diff --git a/hackfest5/.gitignore b/hackfest5/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..0ba4756f172b9aa58e2deebb9f62243d0b91e006
--- /dev/null
+++ b/hackfest5/.gitignore
@@ -0,0 +1,4 @@
+clab-*/
+*.clab.yml.bak
+*.tar
+*.tar.gz
diff --git a/hackfest5/README.md b/hackfest5/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1906a500583945142300256c55f70b06ec210a72
--- /dev/null
+++ b/hackfest5/README.md
@@ -0,0 +1,187 @@
+# Hackfest 5 - Control an Emulated DataPlane through TeraFlowSDN
+
+
+## Prepare your VM
+```bash
+cd ~/tfs-ctrl
+git checkout feat/hackfest5
+git pull
+```
+
+
+
+## ContainerLab Commands
+
+### Download and install ContainerLab
+```bash
+sudo bash -c "$(curl -sL https://get.containerlab.dev)" -- -v 0.59.0
+```
+
+### Check available images in Docker
+```bash
+docker images | grep -E "ceos|multitool"
+```
+
+### Download hackfest5 cEOS image and create Docker image [already done]
+- Note: Image to be downloaded for free from [Arista](https://www.arista.com/en/login) website.
+```bash
+docker import ~/tfs-ctrl/hackfest5/images/arista/cEOS64-lab-4.31.5M.tar ceos:4.31.5M
+docker import ~/tfs-ctrl/hackfest5/images/arista/cEOS64-lab-4.32.2F.tar ceos:4.32.2F
+```
+
+### Deploy scenario
+```bash
+~/tfs-ctrl/hackfest5/clab-deploy.sh
+```
+
+### Inspect scenario
+```bash
+~/tfs-ctrl/hackfest5/clab-inspect.sh
+```
+
+### Show scenario's topology
+```bash
+~/tfs-ctrl/hackfest5/clab-graph.sh
+```
+
+### Destroy scenario
+```bash
+~/tfs-ctrl/hackfest5/clab-destroy.sh
+```
+
+### Access cEOS CLI
+```bash
+~/tfs-ctrl/hackfest5/clab-cli-r1.sh
+```
+
+### Access DC CLI
+```bash
+~/tfs-ctrl/hackfest5/clab-cli-dc1.sh
+```
+
+### Start pinging remote DC
+```bash
+~/tfs-ctrl/hackfest5/clab-cli-dc1.sh
+    ping 192.168.2.10
+```
+
+
+
+## TeraFlowSDN Commands
+
+### Check status of MicroK8s
+```bash
+microk8s.status --wait-ready
+```
+
+### Start MicroK8s
+```bash
+microk8s.start
+```
+
+### Periodically report status of MicroK8s every second
+```bash
+watch -n 1 microk8s.status --wait-ready
+```
+
+### Periodically report status of workload in MicroK8s every second
+```bash
+watch -n 1 kubectl get all --all-namespaces
+```
+
+### Re-Deploy TeraFlowSDN
+```bash
+~/tfs-ctrl/hackfest5/redeploy-tfs.sh
+```
+
+### Show TeraFlowSDN Deployment status
+```bash
+source ~/tfs-ctrl/hackfest5/deploy_specs.sh
+./deploy/show.sh
+```
+
+### Show log of a TeraFlowSDN component
+```bash
+source ~/tfs-ctrl/hackfest5/deploy_specs.sh
+~/tfs-ctrl/scripts/show_logs_device.sh
+```
+
+
+
+## L3VPN Commands
+
+### Create a new IETF L3VPN through TeraFlowSDN NBI
+```bash
+cd ~/tfs-ctrl/hackfest5/data
+curl -X POST \
+    --header "Content-Type: application/json" \
+    --data @ietf-l3vpn-service.json \
+    --user "admin:admin" \
+    http://127.0.0.1/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services
+```
+
+### Get UUID of a IETF L3VPN through TeraFlowSDN NBI
+```bash
+curl --user "admin:admin" \
+    http://127.0.0.1/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services/vpn-service=ietf-l3vpn-svc/
+```
+
+### Delete a IETF L3VPN through TeraFlowSDN NBI
+```bash
+curl -X DELETE --user "admin:admin" \
+    http://127.0.0.1/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services/vpn-service=ietf-l3vpn-svc/
+```
+
+### Start pinging remote DC
+```bash
+~/tfs-ctrl/hackfest5/clab-cli-dc1.sh
+    ping 192.168.2.10
+```
+
+
+
+
+## gNMIc Commands
+
+### Install gNMIc
+```bash
+sudo bash -c "$(curl -sL https://get-gnmic.kmrd.dev)"
+```
+
+### gNMI Capabilities request
+```bash
+gnmic --address clab-hackfest5-r1 --port 6030 --username admin --password admin --insecure capabilities
+```
+
+### gNMI Get request
+```bash
+gnmic --address clab-hackfest5-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path / > r1.json
+gnmic --address clab-hackfest5-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path /interfaces/interface > r1-ifaces.json
+```
+
+### gNMI Set request
+```bash
+gnmic --address clab-hackfest5-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --update-path /system/config/hostname --update-value "my-r1"
+gnmic --address clab-hackfest5-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path /system/config/hostname
+```
+
+### Subscribe request
+```bash
+gnmic --address clab-hackfest5-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf subscribe --path /interfaces/interface[name=Management0]/state/
+
+# In another terminal, you can generate traffic opening SSH connection
+ssh admin@clab-hackfest5-r1
+```
+
+### Check configurations done:
+```bash
+gnmic --address clab-hackfest5-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path '/network-instances' > r1-nis.json
+gnmic --address clab-hackfest5-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path '/interfaces' > r1-ifs.json
+```
+
+### Delete elements:
+```bash
+--address clab-hackfest5-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/network-instances/network-instance[name=b19229e8]'
+--address clab-hackfest5-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]'
+--address clab-hackfest5-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]'
+```
diff --git a/hackfest5/clab-cli-dc1.sh b/hackfest5/clab-cli-dc1.sh
new file mode 100755
index 0000000000000000000000000000000000000000..44631fa227462996b2b924e01d32f5d148610e4b
--- /dev/null
+++ b/hackfest5/clab-cli-dc1.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-hackfest5-dc1 bash
diff --git a/hackfest5/clab-cli-dc2.sh b/hackfest5/clab-cli-dc2.sh
new file mode 100755
index 0000000000000000000000000000000000000000..56e1520142f3749069582bd519e1f425bacd3e4f
--- /dev/null
+++ b/hackfest5/clab-cli-dc2.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-hackfest5-dc2 bash
diff --git a/hackfest5/clab-cli-r1.sh b/hackfest5/clab-cli-r1.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f921809bfea80dffbc66b4a35fcbb7e786a90cdd
--- /dev/null
+++ b/hackfest5/clab-cli-r1.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-hackfest5-r1 Cli
diff --git a/hackfest5/clab-cli-r2.sh b/hackfest5/clab-cli-r2.sh
new file mode 100755
index 0000000000000000000000000000000000000000..154179a636981ecd6d7831f7498873eca8d94274
--- /dev/null
+++ b/hackfest5/clab-cli-r2.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-hackfest5-r2 Cli
diff --git a/hackfest5/clab-deploy.sh b/hackfest5/clab-deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..84b4d2c221f1f11cc46efd13818b37b5d0e492d3
--- /dev/null
+++ b/hackfest5/clab-deploy.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cd ~/tfs-ctrl/hackfest5
+sudo containerlab deploy --topo hackfest5.clab.yml
diff --git a/hackfest5/clab-destroy.sh b/hackfest5/clab-destroy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..dc65a82e7cfd263fc25760b224403e0a31c68188
--- /dev/null
+++ b/hackfest5/clab-destroy.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cd ~/tfs-ctrl/hackfest5
+sudo containerlab destroy --topo hackfest5.clab.yml
+sudo rm -rf clab-hackfest5/ .hackfest5.clab.yml.bak
diff --git a/hackfest5/clab-graph.sh b/hackfest5/clab-graph.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f0ad9693296970dbafab9abaa1f41af2c5ee5f4e
--- /dev/null
+++ b/hackfest5/clab-graph.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cd ~/tfs-ctrl/hackfest5
+sudo containerlab graph --topo hackfest5.clab.yml
diff --git a/hackfest5/clab-inspect.sh b/hackfest5/clab-inspect.sh
new file mode 100755
index 0000000000000000000000000000000000000000..5e1fc7a623796c1c427abaae334352c38d98addb
--- /dev/null
+++ b/hackfest5/clab-inspect.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cd ~/tfs-ctrl/hackfest5
+sudo containerlab inspect --topo hackfest5.clab.yml
diff --git a/hackfest5/data/ietf-l3vpn-service.json b/hackfest5/data/ietf-l3vpn-service.json
new file mode 100644
index 0000000000000000000000000000000000000000..9eb70db5465af56e9877eecf664da0aa7d1313ba
--- /dev/null
+++ b/hackfest5/data/ietf-l3vpn-service.json
@@ -0,0 +1,83 @@
+{
+    "ietf-l3vpn-svc:l3vpn-svc": {
+        "vpn-services": {"vpn-service": [{"vpn-id": "ietf-l3vpn-svc"}]},
+        "sites": {
+            "site": [
+                {
+                    "site-id": "site_DC1",
+                    "management": {"type": "ietf-l3vpn-svc:provider-managed"},
+                    "locations": {"location": [{"location-id": "DC1"}]},
+                    "devices": {"device": [{"device-id": "dc1", "location": "DC1"}]},
+                    "site-network-accesses": {
+                        "site-network-access": [
+                            {
+                                "site-network-access-id": "eth1",
+                                "site-network-access-type": "ietf-l3vpn-svc:multipoint",
+                                "device-reference": "dc1",
+                                "vpn-attachment": {"vpn-id": "ietf-l3vpn-svc", "site-role": "ietf-l3vpn-svc:spoke-role"},
+                                "ip-connection": {
+                                    "ipv4": {
+                                        "address-allocation-type": "ietf-l3vpn-svc:static-address",
+                                        "addresses": {
+                                            "provider-address": "192.168.1.1",
+                                            "customer-address": "192.168.1.10",
+                                            "prefix-length": 24
+                                        }
+                                    }
+                                },
+                                "service": {
+                                    "svc-mtu": 1500,
+                                    "svc-input-bandwidth": 1000000000,
+                                    "svc-output-bandwidth": 1000000000,
+                                    "qos": {"qos-profile": {"classes": {"class": [{
+                                        "class-id": "qos-realtime",
+                                        "direction": "ietf-l3vpn-svc:both",
+                                        "latency": {"latency-boundary": 10},
+                                        "bandwidth": {"guaranteed-bw-percent": 100}
+                                    }]}}}
+                                }
+                            }
+                        ]
+                    }
+                },
+                {
+                    "site-id": "site_DC2",
+                    "management": {"type": "ietf-l3vpn-svc:provider-managed"},
+                    "locations": {"location": [{"location-id": "DC2"}]},
+                    "devices": {"device": [{"device-id": "dc2", "location": "DC2"}]},
+                    "site-network-accesses": {
+                        "site-network-access": [
+                            {
+                                "site-network-access-id": "eth1",
+                                "site-network-access-type": "ietf-l3vpn-svc:multipoint",
+                                "device-reference": "dc2",
+                                "vpn-attachment": {"vpn-id": "ietf-l3vpn-svc", "site-role": "ietf-l3vpn-svc:hub-role"},
+                                "ip-connection": {
+                                    "ipv4": {
+                                        "address-allocation-type": "ietf-l3vpn-svc:static-address",
+                                        "addresses": {
+                                            "provider-address": "192.168.2.1",
+                                            "customer-address": "192.168.2.10",
+                                            "prefix-length": 24
+                                        }
+                                    }
+                                },
+                                "service": {
+                                    "svc-mtu": 1500,
+                                    "svc-input-bandwidth": 1000000000,
+                                    "svc-output-bandwidth": 1000000000,
+                                    "qos": {"qos-profile": {"classes": {"class": [{
+                                        "class-id": "qos-realtime",
+                                        "direction": "ietf-l3vpn-svc:both",
+                                        "latency": {"latency-boundary": 10},
+                                        "bandwidth": {"guaranteed-bw-percent": 100}
+                                    }]}}}
+                                }
+                            }
+                        ]
+                    }
+                }
+            ]
+        }
+    }
+}
diff --git a/hackfest5/data/tfs-service.json b/hackfest5/data/tfs-service.json
new file mode 100644
index 0000000000000000000000000000000000000000..397fc84789111932da047acd22c7bc787888657f
--- /dev/null
+++ b/hackfest5/data/tfs-service.json
@@ -0,0 +1,26 @@
+{
+    "services": [
+        {
+            "service_id": {
+                "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "tfs-l3vpn-svc"}
+            },
+            "service_type": "SERVICETYPE_L3NM",
+            "service_status": {"service_status": "SERVICESTATUS_PLANNED"},
+            "service_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "int"}},
+                {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "int"}}
+            ],
+            "service_constraints": [],
+            "service_config": {"config_rules": [
+                {"action": "CONFIGACTION_SET", "custom": {
+                    "resource_key": "/device[dc1]/endpoint[eth1]/settings",
+                    "resource_value": {"address_ip": "192.168.1.10", "address_prefix": 24, "index": 0}
+                }},
+                {"action": "CONFIGACTION_SET", "custom": {
+                    "resource_key": "/device[dc2]/endpoint[eth1]/settings",
+                    "resource_value": {"address_ip": "192.168.2.10", "address_prefix": 24, "index": 0}
+                }}
+            ]}
+        }
+    ]
+}
diff --git a/hackfest5/data/tfs-topology.json b/hackfest5/data/tfs-topology.json
new file mode 100644
index 0000000000000000000000000000000000000000..49df9de4244651d1eb08c24ca3fcbb53d41d2e34
--- /dev/null
+++ b/hackfest5/data/tfs-topology.json
@@ -0,0 +1,100 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "dc1"}}, "device_type": "emu-datacenter",
+            "device_drivers": ["DEVICEDRIVER_UNDEFINED"],
+            "device_config": {"config_rules": [
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "eth1", "type": "copper"}, {"uuid": "int", "type": "copper"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "dc2"}}, "device_type": "emu-datacenter",
+            "device_drivers": ["DEVICEDRIVER_UNDEFINED"],
+            "device_config": {"config_rules": [
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "eth1", "type": "copper"}, {"uuid": "int", "type": "copper"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "r1"}}, "device_type": "packet-router",
+            "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"],
+            "device_config": {"config_rules": [
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.101"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin", "use_tls": false
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "r2"}}, "device_type": "packet-router",
+            "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"],
+            "device_config": {"config_rules": [
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.102"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin", "use_tls": false
+                }}}
+            ]}
+        }
+    ],
+    "links": [
+        {
+            "link_id": {"link_uuid": {"uuid": "r1/Ethernet2==r2/Ethernet1"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet2"}},
+                {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "r2/Ethernet1==r1/Ethernet2"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet1"}},
+                {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet2"}}
+            ]
+        },
+
+        {
+            "link_id": {"link_uuid": {"uuid": "r1/Ethernet10==dc1/eth1"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet10"}},
+                {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "eth1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "dc1/eth1==r1/Ethernet10"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "eth1"}},
+                {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet10"}}
+            ]
+        },
+
+        {
+            "link_id": {"link_uuid": {"uuid": "r2/Ethernet10==dc2/eth1"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet10"}},
+                {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "eth1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "dc2/eth1==r2/Ethernet10"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "eth1"}},
+                {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet10"}}
+            ]
+        }
+    ]
+}
diff --git a/hackfest5/deploy_specs.sh b/hackfest5/deploy_specs.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e9565218a725fbd416ac9fd2e7a9ca432ef20a96
--- /dev/null
+++ b/hackfest5/deploy_specs.sh
@@ -0,0 +1,208 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+#export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service nbi webui"
+
+# Uncomment to activate Monitoring (old)
+export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Monitoring Framework (new)
+#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation"
+
+# Uncomment to activate QoS Profiles
+#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile"
+
+# Uncomment to activate BGP-LS Speaker
+#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
+
+# Uncomment to activate Optical Controller
+#   To manage optical connections, "service" requires "opticalcontroller" to be deployed
+#   before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
+#   "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it.
+#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
+#    BEFORE="${TFS_COMPONENTS% service*}"
+#    AFTER="${TFS_COMPONENTS#* service}"
+#    export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}"
+#fi
+
+# Uncomment to activate ZTP
+#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp"
+
+# Uncomment to activate Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
+
+# Uncomment to activate Forecaster
+#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster"
+
+# Uncomment to activate E2E Orchestrator
+#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator"
+
+# Uncomment to activate DLT and Interdomain
+#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt"
+#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then
+#    export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk"
+#    export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem"
+#    export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt"
+#fi
+
+# Uncomment to activate QKD App
+#   To manage QKD Apps, "service" requires "qkd_app" to be deployed
+#   before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
+#   "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it.
+#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
+#    BEFORE="${TFS_COMPONENTS% service*}"
+#    AFTER="${TFS_COMPONENTS#* service}"
+#    export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}"
+#fi
+
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy TFS to.
+export TFS_K8S_NAMESPACE="tfs"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+
+# Uncomment to monitor performance of components
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
+# Set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# Disable skip-build flag to rebuild the Docker images.
+export TFS_SKIP_BUILD=""
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4222"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8222"
+
+# Set NATS installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/nats.sh for additional details
+export NATS_DEPLOY_MODE="single"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8812"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9009"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9000"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
+
+
+# ----- Apache Kafka -----------------------------------------------------------
+
+# Set the namespace where Apache Kafka will be deployed.
+#export KFK_NAMESPACE="kafka"
+
+# Set the port Apache Kafka server will be exposed to.
+#export KFK_SERVER_PORT="9092"
+
+# Set the flag to YES for redeploying of Apache Kafka
+#export KFK_REDEPLOY=""
diff --git a/hackfest5/hackfest5.clab.yml b/hackfest5/hackfest5.clab.yml
new file mode 100644
index 0000000000000000000000000000000000000000..acc58e9d01c245108e85ca786427eeac5442203f
--- /dev/null
+++ b/hackfest5/hackfest5.clab.yml
@@ -0,0 +1,67 @@
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TFS - Arista devices + Linux clients
+
+name: hackfest5
+
+mgmt:
+  network: mgmt-net
+  ipv4-subnet: 172.20.20.0/24
+
+topology:
+  kinds:
+    arista_ceos:
+      kind: arista_ceos
+      #image: ceos:4.30.4M
+      #image: ceos:4.31.2F
+      image: ceos:4.31.5M
+      #image: ceos:4.32.0F
+      #image: ceos:4.32.2F
+      #image: ceos:4.32.2.1F
+    linux:
+      kind: linux
+      image: ghcr.io/hellt/network-multitool:latest
+
+  nodes:
+    r1:
+      kind: arista_ceos
+      mgmt-ipv4: 172.20.20.101
+      startup-config: r1-startup.cfg
+
+    r2:
+      kind: arista_ceos
+      mgmt-ipv4: 172.20.20.102
+      startup-config: r2-startup.cfg
+
+    dc1:
+      kind: linux
+      mgmt-ipv4: 172.20.20.201
+      exec:
+        - ip link set address 00:c1:ab:00:01:01 dev eth1
+        - ip address add 192.168.1.10/24 dev eth1
+        - ip route add 192.168.2.0/24 via 192.168.1.1
+
+    dc2:
+      kind: linux
+      mgmt-ipv4: 172.20.20.202
+      exec:
+        - ip link set address 00:c1:ab:00:02:01 dev eth1
+        - ip address add 192.168.2.10/24 dev eth1
+        - ip route add 192.168.1.0/24 via 192.168.2.1
+
+  links:
+    - endpoints: ["r1:eth2", "r2:eth1"]
+    - endpoints: ["r1:eth10", "dc1:eth1"]
+    - endpoints: ["r2:eth10", "dc2:eth1"]
diff --git a/hackfest5/images/arista/.gitignore b/hackfest5/images/arista/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..284b64ce5902e2c74c656fb0c7256bc11a59575d
--- /dev/null
+++ b/hackfest5/images/arista/.gitignore
@@ -0,0 +1,3 @@
+!.gitkeep
+*.tar
+*.tar.gz
diff --git a/hackfest5/images/arista/.gitkeep b/hackfest5/images/arista/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/hackfest5/r1-startup.cfg b/hackfest5/r1-startup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..2d1964f5f6505411674890acafeb05e574f49aa5
--- /dev/null
+++ b/hackfest5/r1-startup.cfg
@@ -0,0 +1,39 @@
+! device: r1 (cEOSLab, EOS-4.31.2F-35442176.4312F (engineering build))
+!
+no aaa root
+!
+username admin privilege 15 role network-admin secret sha512 $6$tUMBMqI5iPca5XcJ$5QU/R83S.zjpHQyeB3H63BGWOgxewjqZ1NsxdaWPo3gLwRXVTrgYvMmwwZlzjYoqrD7yp7e9YD073/.FKLYEY1
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname r1
+!
+spanning-tree mode mstp
+!
+system l1
+   unsupported speed action error
+   unsupported error-correction action error
+!
+management api http-commands
+   no shutdown
+!
+management api gnmi
+   transport grpc default
+!
+management api netconf
+   transport ssh default
+!
+interface Ethernet2
+!
+interface Ethernet10
+!
+interface Management0
+   ip address 172.20.20.101/24
+!
+ip routing
+!
+ip route 0.0.0.0/0 172.20.20.1
+!
+end
diff --git a/hackfest5/r2-startup.cfg b/hackfest5/r2-startup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..7acd56bf64ebc45a437b438f1b13c4aa4182b794
--- /dev/null
+++ b/hackfest5/r2-startup.cfg
@@ -0,0 +1,39 @@
+! device: r2 (cEOSLab, EOS-4.31.2F-35442176.4312F (engineering build))
+!
+no aaa root
+!
+username admin privilege 15 role network-admin secret sha512 $6$Z/om4jI3S5BmwxfB$igaSOaJnh3m36TbSMHKCusA77m07CU8JJxalupXIUFuy7HaGt6k.C1kfSJsPqjn1AhLaL.LvLkt/hcqTFgpjG.
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname r2
+!
+spanning-tree mode mstp
+!
+system l1
+   unsupported speed action error
+   unsupported error-correction action error
+!
+management api http-commands
+   no shutdown
+!
+management api gnmi
+   transport grpc default
+!
+management api netconf
+   transport ssh default
+!
+interface Ethernet1
+!
+interface Ethernet10
+!
+interface Management0
+   ip address 172.20.20.102/24
+!
+ip routing
+!
+ip route 0.0.0.0/0 172.20.20.1
+!
+end
diff --git a/hackfest5/redeploy-tfs.sh b/hackfest5/redeploy-tfs.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ff55bed3f55e355293e842b2f92c5547715cb247
--- /dev/null
+++ b/hackfest5/redeploy-tfs.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source ~/tfs-ctrl/hackfest5/deploy_specs.sh
+
+echo "Cleaning-up old NATS and Kafka deployments..."
+helm3 uninstall --namespace ${NATS_NAMESPACE} ${NATS_NAMESPACE}
+kubectl delete namespace ${NATS_NAMESPACE} --ignore-not-found
+kubectl delete namespace kafka --ignore-not-found
+printf "\n"
+
+echo "Deployting TeraFlowSDN..."
+
+# Deploy CockroachDB
+./deploy/crdb.sh
+
+# Deploy NATS
+./deploy/nats.sh
+
+# Deploy QuestDB
+./deploy/qdb.sh
+
+# Expose Dashboard
+./deploy/expose_dashboard.sh
+
+# Deploy TeraFlowSDN
+./deploy/tfs.sh
+
+# Show deploy summary
+./deploy/show.sh
+
+printf "\n"
+
+echo "Waiting for Context to be subscribed to NATS..."
+while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do
+    printf "%c" "."
+    sleep 1
+done
+kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server
+printf "\n"
diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml
index 4d3b7780c4cfd82a87f89baee57633503d30b92d..5a321c33e33e83b5eb67b9bacf8b9ce8f1edf304 100644
--- a/manifests/webuiservice.yaml
+++ b/manifests/webuiservice.yaml
@@ -12,6 +12,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: grafana-pvc
+spec:
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
 apiVersion: apps/v1
 kind: Deployment
 metadata:
@@ -99,6 +110,13 @@ spec:
             limits:
               cpu: 500m
               memory: 1024Mi
+          volumeMounts:
+            - mountPath: /var/lib/grafana
+              name: grafana-pv
+      volumes:
+        - name: grafana-pv
+          persistentVolumeClaim:
+            claimName: grafana-pvc
 ---
 apiVersion: v1
 kind: Service
diff --git a/scripts/grpcurl_inspect_context.sh b/scripts/grpcurl_inspect_context.sh
new file mode 100755
index 0000000000000000000000000000000000000000..dda920a7a1899a9d8cdc3e1d1c7c576463c66fed
--- /dev/null
+++ b/scripts/grpcurl_inspect_context.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Ref: https://github.com/fullstorydev/grpcurl
+
+source tfs_runtime_env_vars.sh
+
+GRPC_ENDPOINT="$CONTEXTSERVICE_SERVICE_HOST:$CONTEXTSERVICE_SERVICE_PORT_GRPC"
+GRP_CURL_CMD="docker run fullstorydev/grpcurl --plaintext $GRPC_ENDPOINT"
+
+GRPC_SERVICES=`$GRP_CURL_CMD list`
+echo "gRPC Services found in $GRPC_ENDPOINT:"
+printf "\n"
+
+for GRPC_SERVICE in $GRPC_SERVICES; do
+    echo "gRPC Service: $GRPC_SERVICE"
+    $GRP_CURL_CMD describe $GRPC_SERVICE
+    printf "\n"
+done
+
+echo "Done!"
diff --git a/scripts/grpcurl_inspect_device.sh b/scripts/grpcurl_inspect_device.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0e1202fb6ed3b0306f3dfe03cd60eeb55c7abe83
--- /dev/null
+++ b/scripts/grpcurl_inspect_device.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Ref: https://github.com/fullstorydev/grpcurl
+
+source tfs_runtime_env_vars.sh
+
+GRPC_ENDPOINT="$DEVICESERVICE_SERVICE_HOST:$DEVICESERVICE_SERVICE_PORT_GRPC"
+GRP_CURL_CMD="docker run fullstorydev/grpcurl --plaintext $GRPC_ENDPOINT"
+
+GRPC_SERVICES=`$GRP_CURL_CMD list`
+echo "gRPC Services found in $GRPC_ENDPOINT:"
+printf "\n"
+
+for GRPC_SERVICE in $GRPC_SERVICES; do
+    echo "gRPC Service: $GRPC_SERVICE"
+    $GRP_CURL_CMD describe $GRPC_SERVICE
+    printf "\n"
+done
+
+echo "Done!"
diff --git a/scripts/grpcurl_inspect_pathcomp_frontend.sh b/scripts/grpcurl_inspect_pathcomp_frontend.sh
new file mode 100755
index 0000000000000000000000000000000000000000..686f7ae1e46aed2c60a33cd7fc7265ff1b5a3762
--- /dev/null
+++ b/scripts/grpcurl_inspect_pathcomp_frontend.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Ref: https://github.com/fullstorydev/grpcurl
+
+source tfs_runtime_env_vars.sh
+
+GRPC_ENDPOINT="$PATHCOMPSERVICE_SERVICE_HOST:$PATHCOMPSERVICE_SERVICE_PORT_GRPC"
+GRP_CURL_CMD="docker run fullstorydev/grpcurl --plaintext $GRPC_ENDPOINT"
+
+GRPC_SERVICES=`$GRP_CURL_CMD list`
+echo "gRPC Services found in $GRPC_ENDPOINT:"
+printf "\n"
+
+for GRPC_SERVICE in $GRPC_SERVICES; do
+    echo "gRPC Service: $GRPC_SERVICE"
+    $GRP_CURL_CMD describe $GRPC_SERVICE
+    printf "\n"
+done
+
+echo "Done!"
diff --git a/scripts/grpcurl_inspect_service.sh b/scripts/grpcurl_inspect_service.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f1b674ee5aa8d7f0a1878840cc1f674d61b51ea5
--- /dev/null
+++ b/scripts/grpcurl_inspect_service.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Ref: https://github.com/fullstorydev/grpcurl
+
+source tfs_runtime_env_vars.sh
+
+GRPC_ENDPOINT="$SERVICESERVICE_SERVICE_HOST:$SERVICESERVICE_SERVICE_PORT_GRPC"
+GRP_CURL_CMD="docker run fullstorydev/grpcurl --plaintext $GRPC_ENDPOINT"
+
+GRPC_SERVICES=`$GRP_CURL_CMD list`
+echo "gRPC Services found in $GRPC_ENDPOINT:"
+printf "\n"
+
+for GRPC_SERVICE in $GRPC_SERVICES; do
+    echo "gRPC Service: $GRPC_SERVICE"
+    $GRP_CURL_CMD describe $GRPC_SERVICE
+    printf "\n"
+done
+
+echo "Done!"
diff --git a/scripts/grpcurl_inspect_slice.sh b/scripts/grpcurl_inspect_slice.sh
new file mode 100755
index 0000000000000000000000000000000000000000..170be7bf567c5f7adb0d84db62416ecc0d512f21
--- /dev/null
+++ b/scripts/grpcurl_inspect_slice.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+########################################################################################################################
+# Define your deployment settings here
+########################################################################################################################
+
+# If not already set, set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Ref: https://github.com/fullstorydev/grpcurl
+
+source tfs_runtime_env_vars.sh
+
+GRPC_ENDPOINT="$SLICESERVICE_SERVICE_HOST:$SLICESERVICE_SERVICE_PORT_GRPC"
+GRP_CURL_CMD="docker run fullstorydev/grpcurl --plaintext $GRPC_ENDPOINT"
+
+GRPC_SERVICES=`$GRP_CURL_CMD list`
+echo "gRPC Services found in $GRPC_ENDPOINT:"
+printf "\n"
+
+for GRPC_SERVICE in $GRPC_SERVICES; do
+    echo "gRPC Service: $GRPC_SERVICE"
+    $GRP_CURL_CMD describe $GRPC_SERVICE
+    printf "\n"
+done
+
+echo "Done!"
diff --git a/src/common/tools/client/RetryDecorator.py b/src/common/tools/client/RetryDecorator.py
index fc45d95c8dccbbca32ace5fbba5fb4aefc7a4d79..4750ff73ae4342ce2eb2a31941ff48b46e5be281 100644
--- a/src/common/tools/client/RetryDecorator.py
+++ b/src/common/tools/client/RetryDecorator.py
@@ -51,24 +51,32 @@ LOGGER = logging.getLogger(__name__)
 def delay_linear(initial=0, increment=0, maximum=None):
     def compute(num_try):
         delay = initial + (num_try - 1) * increment
-        if maximum is not None: delay = max(delay, maximum)
+        if maximum is not None:
+            delay = max(delay, maximum)
         return delay
     return compute
 
 def delay_exponential(initial=1, increment=1, maximum=None):
     def compute(num_try):
         delay = initial * pow(increment, (num_try - 1))
-        if maximum is not None: delay = max(delay, maximum)
+        if maximum is not None:
+            delay = max(delay, maximum)
         return delay
     return compute
 
-def retry(max_retries=0, delay_function=delay_linear(initial=0, increment=0),
-          prepare_method_name=None, prepare_method_args=[], prepare_method_kwargs={}):
+# pylint: disable=dangerous-default-value
+def retry(
+    max_retries=0, delay_function=delay_linear(initial=0, increment=0),
+    prepare_method_name=None, prepare_method_args=list(), prepare_method_kwargs=dict()
+):
     def _reconnect(func):
         def wrapper(self, *args, **kwargs):
             if prepare_method_name is not None:
                 prepare_method = getattr(self, prepare_method_name, None)
-                if prepare_method is None: raise Exception('Prepare Method ({}) not found'.format(prepare_method_name))
+                if prepare_method is None:
+                    MSG = 'Prepare Method ({:s}) not found'
+                    # pylint: disable=broad-exception-raised
+                    raise Exception(MSG.format(prepare_method_name))
             num_try, given_up = 0, False
             while not given_up:
                 try:
@@ -78,14 +86,29 @@ def retry(max_retries=0, delay_function=delay_linear(initial=0, increment=0),
 
                     num_try += 1
                     given_up = num_try > max_retries
-                    if given_up: raise Exception('Giving up... {:d} tries failed'.format(max_retries)) from e
+                    if given_up:
+                        MSG = '[{:s}:{:s}] Giving up... {:d} tries failed'
+                        msg = MSG.format(func.__module__, func.__name__, max_retries)
+                        # pylint: disable=broad-exception-raised
+                        raise Exception(msg) from e
                     if delay_function is not None:
                         delay = delay_function(num_try)
                         time.sleep(delay)
-                        LOGGER.info('Retry {:d}/{:d} after {:f} seconds...'.format(num_try, max_retries, delay))
+                        MSG = '[{:s}:{:s}] Retry {:d}/{:d} after {:f} seconds...'
+                        LOGGER.info(MSG.format(
+                            func.__module__, func.__name__, num_try, max_retries, delay
+                        ))
                     else:
-                        LOGGER.info('Retry {:d}/{:d} immediate...'.format(num_try, max_retries))
+                        MSG = '[{:s}:{:s}] Retry {:d}/{:d} immediate...'
+                        LOGGER.info(MSG.format(
+                            func.__module__, func.__name__, num_try, max_retries
+                        ))
 
-                    if prepare_method_name is not None: prepare_method(*prepare_method_args, **prepare_method_kwargs)
+                    if prepare_method_name is not None:
+                        MSG = '[{:s}:{:s}] Running prepare method...'
+                        LOGGER.debug(MSG.format(
+                            prepare_method.__module__, prepare_method.__name__
+                        ))
+                        prepare_method(*prepare_method_args, **prepare_method_kwargs)
         return wrapper
     return _reconnect
diff --git a/src/common/tools/service/GenericGrpcService.py b/src/common/tools/service/GenericGrpcService.py
index f29582fff87f4ca89ee44c78adbec33f321a9a39..453309127ccf49272d004740c1e3be52cba26779 100644
--- a/src/common/tools/service/GenericGrpcService.py
+++ b/src/common/tools/service/GenericGrpcService.py
@@ -12,18 +12,21 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Optional, Union
 import grpc, logging
 from concurrent import futures
+from typing import Any, List, Optional, Union
 from grpc_health.v1.health import HealthServicer, OVERALL_HEALTH
 from grpc_health.v1.health_pb2 import HealthCheckResponse
 from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server
+from grpc_reflection.v1alpha import reflection
 from common.Settings import get_grpc_bind_address, get_grpc_grace_period, get_grpc_max_workers
 
 class GenericGrpcService:
     def __init__(
-        self, bind_port : Union[str, int], bind_address : Optional[str] = None, max_workers : Optional[int] = None,
-        grace_period : Optional[int] = None, enable_health_servicer : bool = True, cls_name : str = __name__
+        self, bind_port : Union[str, int], bind_address : Optional[str] = None,
+        max_workers : Optional[int] = None, grace_period : Optional[int] = None,
+        enable_health_servicer : bool = True, enable_reflection : bool = True,
+        cls_name : str = __name__
     ) -> None:
         self.logger = logging.getLogger(cls_name)
         self.bind_port = bind_port
@@ -31,6 +34,8 @@ class GenericGrpcService:
         self.max_workers = get_grpc_max_workers() if max_workers is None else max_workers
         self.grace_period = get_grpc_grace_period() if grace_period is None else grace_period
         self.enable_health_servicer = enable_health_servicer
+        self.enable_reflection = enable_reflection
+        self.reflection_service_names : List[str] = [reflection.SERVICE_NAME]
         self.endpoint = None
         self.health_servicer = None
         self.pool = None
@@ -39,6 +44,11 @@ class GenericGrpcService:
     def install_servicers(self):
         pass
 
+    def add_reflection_service_name(self, service_descriptor : Any, service_name : str):
+        self.reflection_service_names.append(
+            service_descriptor.services_by_name[service_name].full_name
+        )
+
     def start(self):
         self.endpoint = '{:s}:{:s}'.format(str(self.bind_address), str(self.bind_port))
         self.logger.info('Starting Service (tentative endpoint: {:s}, max_workers: {:s})...'.format(
@@ -54,6 +64,9 @@ class GenericGrpcService:
                 experimental_non_blocking=True, experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1))
             add_HealthServicer_to_server(self.health_servicer, self.server)
 
+        if self.enable_reflection:
+            reflection.enable_server_reflection(self.reflection_service_names, self.server)
+
         self.bind_port = self.server.add_insecure_port(self.endpoint)
         self.endpoint = '{:s}:{:s}'.format(str(self.bind_address), str(self.bind_port))
         self.logger.info('Listening on {:s}...'.format(str(self.endpoint)))
diff --git a/src/common/tools/service/GenericGrpcServiceAsync.py b/src/common/tools/service/GenericGrpcServiceAsync.py
index 1d652deb79e1389e2403d1d13debcba7dbc43ecc..551a3d568612f59c2bc26f692ab8d1d27dc4f4b3 100644
--- a/src/common/tools/service/GenericGrpcServiceAsync.py
+++ b/src/common/tools/service/GenericGrpcServiceAsync.py
@@ -12,19 +12,21 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Optional, Union
-import grpc
-import logging
+import grpc, logging
 from concurrent import futures
+from typing import Any, List, Optional, Union
 from grpc_health.v1.health import HealthServicer, OVERALL_HEALTH
 from grpc_health.v1.health_pb2 import HealthCheckResponse
 from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server
+from grpc_reflection.v1alpha import reflection
 from common.Settings import get_grpc_bind_address, get_grpc_grace_period, get_grpc_max_workers
 
 class GenericGrpcServiceAsync:
     def __init__(
-        self, bind_port: Union[str, int], bind_address: Optional[str] = None, max_workers: Optional[int] = None,
-        grace_period: Optional[int] = None, enable_health_servicer: bool = True, cls_name: str = __name__
+        self, bind_port : Union[str, int], bind_address : Optional[str] = None,
+        max_workers : Optional[int] = None, grace_period : Optional[int] = None,
+        enable_health_servicer : bool = True, enable_reflection : bool = True,
+        cls_name : str = __name__
     ) -> None:
         self.logger = logging.getLogger(cls_name)
         self.bind_port = bind_port
@@ -32,6 +34,8 @@ class GenericGrpcServiceAsync:
         self.max_workers = get_grpc_max_workers() if max_workers is None else max_workers
         self.grace_period = get_grpc_grace_period() if grace_period is None else grace_period
         self.enable_health_servicer = enable_health_servicer
+        self.enable_reflection = enable_reflection
+        self.reflection_service_names : List[str] = [reflection.SERVICE_NAME]
         self.endpoint = None
         self.health_servicer = None
         self.pool = None
@@ -40,7 +44,12 @@ class GenericGrpcServiceAsync:
     async def install_servicers(self):
         pass
 
-    async def start(self):
+    def add_reflection_service_name(self, service_descriptor : Any, service_name : str):
+        self.reflection_service_names.append(
+            service_descriptor.services_by_name[service_name].full_name
+        )
+
+    def start(self):
         self.endpoint = '{:s}:{:s}'.format(str(self.bind_address), str(self.bind_port))
         self.logger.info('Starting Service (tentative endpoint: {:s}, max_workers: {:s})...'.format(
             str(self.endpoint), str(self.max_workers)))
@@ -55,6 +64,9 @@ class GenericGrpcServiceAsync:
                 experimental_non_blocking=True, experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1))
             add_HealthServicer_to_server(self.health_servicer, self.server)
 
+        if self.enable_reflection:
+            reflection.enable_server_reflection(self.reflection_service_names, self.server)
+
         self.bind_port = self.server.add_insecure_port(self.endpoint)
         self.endpoint = '{:s}:{:s}'.format(str(self.bind_address), str(self.bind_port))
         self.logger.info('Listening on {:s}...'.format(str(self.endpoint)))
diff --git a/src/context/service/ContextService.py b/src/context/service/ContextService.py
index a385f4481ec022b475649b749f5206512fe216b2..d633dea15a1e00316c9e8eac9d31ca69829f180f 100644
--- a/src/context/service/ContextService.py
+++ b/src/context/service/ContextService.py
@@ -16,7 +16,9 @@ import logging, sqlalchemy
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_port_grpc
 from common.message_broker.MessageBroker import MessageBroker
+from common.proto.context_pb2 import DESCRIPTOR as CONTEXT_DESCRIPTOR
 from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
+from common.proto.context_policy_pb2 import DESCRIPTOR as CONTEXT_POLICY_DESCRIPTOR
 from common.proto.context_policy_pb2_grpc import add_ContextPolicyServiceServicer_to_server
 from common.tools.service.GenericGrpcService import GenericGrpcService
 from .ContextServiceServicerImpl import ContextServiceServicerImpl
@@ -36,3 +38,6 @@ class ContextService(GenericGrpcService):
     def install_servicers(self):
         add_ContextServiceServicer_to_server(self.context_servicer, self.server)
         add_ContextPolicyServiceServicer_to_server(self.context_servicer, self.server)
+
+        self.add_reflection_service_name(CONTEXT_DESCRIPTOR, 'ContextService')
+        self.add_reflection_service_name(CONTEXT_POLICY_DESCRIPTOR, 'ContextPolicyService')
diff --git a/src/device/requirements.in b/src/device/requirements.in
index 30c98265040f435968d842dad40ca8b1272ab8de..e5ac64a6452abf7a17d31e93c36f16ca66122bc0 100644
--- a/src/device/requirements.in
+++ b/src/device/requirements.in
@@ -30,7 +30,7 @@ ncclient==0.6.15
 numpy<2.0.0
 p4runtime==1.3.0
 pandas==1.5.*
-paramiko==2.9.2
+paramiko==2.11.*
 pyang==2.6.*
 git+https://github.com/robshakir/pyangbind.git
 python-json-logger==2.0.2
@@ -39,7 +39,7 @@ python-json-logger==2.0.2
 requests==2.27.1
 requests-mock==1.9.3
 tabulate
-websockets==10.4
+websockets==12.0
 werkzeug==2.3.7
 xmltodict==0.12.0
 yattag
diff --git a/src/device/service/DeviceService.py b/src/device/service/DeviceService.py
index a94259471e76e40ddc5c6a1f2c109e8ada5075ae..a5a48e1bf80bce67fccd32316fa92935428d5b65 100644
--- a/src/device/service/DeviceService.py
+++ b/src/device/service/DeviceService.py
@@ -12,10 +12,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import os
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_port_grpc
+from common.proto.device_pb2 import DESCRIPTOR as DEVICE_DESCRIPTOR
 from common.proto.device_pb2_grpc import add_DeviceServiceServicer_to_server
+from common.proto.optical_device_pb2 import DESCRIPTOR as OPTICAL_DEVICE_DESCRIPTOR
 from common.proto.optical_device_pb2_grpc import add_OpenConfigServiceServicer_to_server
 from common.tools.service.GenericGrpcService import GenericGrpcService
 from device.Config import LOAD_ALL_DEVICE_DRIVERS
@@ -41,8 +42,11 @@ class DeviceService(GenericGrpcService):
     def install_servicers(self):
         self.monitoring_loops.start()
         add_DeviceServiceServicer_to_server(self.device_servicer, self.server)
+        self.add_reflection_service_name(DEVICE_DESCRIPTOR, 'DeviceService')
+
         if LOAD_ALL_DEVICE_DRIVERS:
             add_OpenConfigServiceServicer_to_server(self.openconfig_device_servicer,self.server)
+            self.add_reflection_service_name(OPTICAL_DEVICE_DESCRIPTOR, 'OpenConfigService')
 
     def stop(self):
         super().stop()
diff --git a/src/device/service/driver_api/_Driver.py b/src/device/service/driver_api/_Driver.py
index 2580c3e785450f7be1972679872c3d16db39c714..e838e5b3d3cfca353fd4994fd60ad387b5f6766d 100644
--- a/src/device/service/driver_api/_Driver.py
+++ b/src/device/service/driver_api/_Driver.py
@@ -25,12 +25,15 @@ RESOURCE_ROUTING_POLICIES = '__routing_policies__'
 RESOURCE_SERVICES = '__services__'
 RESOURCE_ACL = '__acl__'
 RESOURCE_INVENTORY = '__inventory__'
+RESOURCE_RULES = "__rules__"
 
 
 class _Driver:
     def __init__(self, name : str, address: str, port: int, **settings) -> None:
         """ Initialize Driver.
             Parameters:
+                name : str
+                    Device driver name
                 address : str
                     The address of the device
                 port : int
diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/Interface.py b/src/device/service/drivers/gnmi_openconfig/handlers/Interface.py
index fda3ab71be3b2c82020e93f92cf159da38e5eef4..a52c84691c00b0c09336e2400dbeb94a9ca310ed 100644
--- a/src/device/service/drivers/gnmi_openconfig/handlers/Interface.py
+++ b/src/device/service/drivers/gnmi_openconfig/handlers/Interface.py
@@ -69,7 +69,6 @@ class InterfaceHandler(_Handler):
             yang_ipv4_addr : libyang.DContainer = yang_ipv4_addrs.create_path(yang_ipv4_addr_path)
             yang_ipv4_addr.create_path('config/ip',            address_ip)
             yang_ipv4_addr.create_path('config/prefix-length', address_prefix)
-            if mtu is not None: yang_ipv4_addr.create_path('config/mtu', mtu)
 
         str_path = '/interfaces/interface[name={:s}]'.format(if_name)
         str_data = yang_if.print_mem('json')
diff --git a/src/device/service/drivers/p4/p4_common.py b/src/device/service/drivers/p4/p4_common.py
index ec851493777243829737136722198173580fbadd..b55296a65922de93370a66301254cacf9ca7220a 100644
--- a/src/device/service/drivers/p4/p4_common.py
+++ b/src/device/service/drivers/p4/p4_common.py
@@ -27,10 +27,12 @@ import math
 import re
 import socket
 import ipaddress
+from typing import Any, Dict, List, Optional, Tuple
 from ctypes import c_uint16, sizeof
 import macaddress
 
-from common.type_checkers.Checkers import chk_type
+from common.type_checkers.Checkers import \
+    chk_attribute, chk_string, chk_type, chk_issubclass
 try:
     from .p4_exception import UserBadValueError
 except ImportError:
@@ -38,6 +40,7 @@ except ImportError:
 
 P4_ATTR_DEV_ID = "id"
 P4_ATTR_DEV_NAME = "name"
+P4_ATTR_DEV_ENDPOINTS = "endpoints"
 P4_ATTR_DEV_VENDOR = "vendor"
 P4_ATTR_DEV_HW_VER = "hw_ver"
 P4_ATTR_DEV_SW_VER = "sw_ver"
@@ -50,6 +53,7 @@ P4_VAL_DEF_HW_VER = "BMv2 simple_switch"
 P4_VAL_DEF_SW_VER = "Stratum"
 P4_VAL_DEF_TIMEOUT = 60
 
+RESOURCE_ENDPOINTS_ROOT_PATH = "/endpoints"
 
 # Logger instance
 LOGGER = logging.getLogger(__name__)
@@ -422,6 +426,28 @@ def parse_action_parameters_from_json(resource):
     return action_params
 
 
+def parse_replicas_from_json(resource):
+    """
+    Parse the session replicas within a JSON-based object.
+
+    :param resource: JSON-based object
+    :return: map of replicas
+    """
+    if not resource or ("replicas" not in resource):
+        LOGGER.warning(
+            "JSON entry misses 'replicas' list of attributes")
+        return None
+    chk_type("replicas", resource["replicas"], list)
+
+    replicas = {}
+    for rep in resource["replicas"]:
+        chk_type("egress-port", rep["egress-port"], int)
+        chk_type("instance", rep["instance"], int)
+        replicas[rep["egress-port"]] = rep["instance"]
+
+    return replicas
+
+
 def parse_integer_list_from_json(resource, resource_list, resource_item):
     """
     Parse the list of integers within a JSON-based object.
@@ -443,3 +469,77 @@ def parse_integer_list_from_json(resource, resource_list, resource_item):
         integers_list.append(item[resource_item])
 
     return integers_list
+
+def process_optional_string_field(
+    #TODO: Consider adding this in common methdos as it is taken by the Emulated driver
+    endpoint_data : Dict[str, Any], field_name : str, endpoint_resource_value : Dict[str, Any]
+) -> None:
+    field_value = chk_attribute(field_name, endpoint_data, 'endpoint_data', default=None)
+    if field_value is None: return
+    chk_string('endpoint_data.{:s}'.format(field_name), field_value)
+    if len(field_value) > 0: endpoint_resource_value[field_name] = field_value
+
+def compose_resource_endpoints(endpoints_list : List[Tuple[str, Any]]):
+    #TODO: Consider adding this in common methods; currently taken by the Emulated driver
+    endpoint_resources = []
+    for i, endpoint in enumerate(endpoints_list):
+        LOGGER.debug("P4 endpoint {}: {}".format(i, endpoint))
+        endpoint_resource = compose_resource_endpoint(endpoint)
+        if endpoint_resource is None: continue
+        endpoint_resources.append(endpoint_resource)
+    return endpoint_resources
+
+def compose_resource_endpoint(endpoint_data : Dict[str, Any]) -> Optional[Tuple[str, Dict]]:
+    #TODO: Consider adding this in common methods; currently taken by the Emulated driver
+    try:
+        endpoint_uuid = chk_attribute('uuid', endpoint_data, 'endpoint_data')
+        chk_string('endpoint_data.uuid', endpoint_uuid, min_length=1)
+        endpoint_resource_path = RESOURCE_ENDPOINTS_ROOT_PATH
+        endpoint_resource_key = '{:s}/endpoint[{:s}]'.format(endpoint_resource_path, endpoint_uuid)
+        endpoint_resource_value = {'uuid': endpoint_uuid}
+
+        # Check endpoint's optional string fields
+        process_optional_string_field(endpoint_data, 'name', endpoint_resource_value)
+        process_optional_string_field(endpoint_data, 'type', endpoint_resource_value)
+        process_optional_string_field(endpoint_data, 'context_uuid', endpoint_resource_value)
+        process_optional_string_field(endpoint_data, 'topology_uuid', endpoint_resource_value)
+
+        return endpoint_resource_key, endpoint_resource_value
+    except: # pylint: disable=bare-except
+        LOGGER.error('Problem composing endpoint({:s})'.format(str(endpoint_data)))
+        return None
+
+def compose_resource_rules(rules_list : List[Tuple[str, Any]]):
+    rule_resources = []
+    for i, rule in enumerate(rules_list):
+        rule_resource = compose_resource_rule(rule_data=rule, rule_cnt=i)
+        if rule_resource is None: continue
+        rule_resources.append(rule_resource)
+    return rule_resources
+
+def compose_resource_rule(rule_data : Dict[str, Any], rule_cnt : int) -> Optional[Tuple[str, Dict]]:
+    try:
+        LOGGER.info("Rule: {}".format(rule_data))
+
+        rule_resource_key = chk_attribute('resource_key', rule_data, 'rule_data')
+        chk_string('rule_data.resource_key', rule_resource_key, min_length=1)
+
+        rule_resource_value = chk_attribute('resource_value', rule_data, 'rule_data')
+        chk_issubclass('rule_data.resource_value', rule_resource_value, dict)
+
+        rule_key_unique = ""
+
+        if "table" == rule_resource_key:
+            table_name = parse_resource_string_from_json(rule_resource_value, "table-name")
+            assert table_name, "Invalid table name in rule"
+            rule_key_unique = '/{0}s/{0}/{1}[{2}]'.format(rule_resource_key, table_name, rule_cnt)
+        else:
+            msg = f"Parsed an invalid key {rule_resource_key}"
+            LOGGER.error(msg)
+            raise Exception(msg)
+
+        assert rule_key_unique, "Invalid unique resource key"
+        return rule_key_unique, rule_resource_value
+    except: # pylint: disable=bare-except
+        LOGGER.error('Problem composing rule({:s})'.format(str(rule_data)))
+        return None
diff --git a/src/device/service/drivers/p4/p4_context.py b/src/device/service/drivers/p4/p4_context.py
index ca8f0c19ef6cecdc361664323bbe2cd8beed64e3..ce8e308e8b2f55aa37c7cf06b6a56fdf7dc3bd7f 100644
--- a/src/device/service/drivers/p4/p4_context.py
+++ b/src/device/service/drivers/p4/p4_context.py
@@ -34,6 +34,7 @@ class P4Type(enum.Enum):
     meter = 6
     direct_meter = 7
     controller_packet_metadata = 8
+    digest = 9
 
 
 P4Type.table.p4info_name = "tables"
@@ -44,6 +45,7 @@ P4Type.direct_counter.p4info_name = "direct_counters"
 P4Type.meter.p4info_name = "meters"
 P4Type.direct_meter.p4info_name = "direct_meters"
 P4Type.controller_packet_metadata.p4info_name = "controller_packet_metadata"
+P4Type.digest.p4info_name = "digests"
 
 for object_type in P4Type:
     object_type.pretty_name = object_type.name.replace('_', ' ')
@@ -58,11 +60,12 @@ class P4RuntimeEntity(enum.Enum):
     table_entry = 1
     action_profile_member = 2
     action_profile_group = 3
-    meter_entry = 4
-    direct_meter_entry = 5
-    counter_entry = 6
-    direct_counter_entry = 7
+    counter_entry = 4
+    direct_counter_entry = 5
+    meter_entry = 6
+    direct_meter_entry = 7
     packet_replication_engine_entry = 8
+    digest_entry = 9
 
 
 class Context:
diff --git a/src/device/service/drivers/p4/p4_driver.py b/src/device/service/drivers/p4/p4_driver.py
index d31fa46738fad9f9404c8ba21dfbc49b08fde074..c89a42baddaf45737ebfcf26a665f0a6beb8544f 100644
--- a/src/device/service/drivers/p4/p4_driver.py
+++ b/src/device/service/drivers/p4/p4_driver.py
@@ -23,15 +23,19 @@ import threading
 from typing import Any, Iterator, List, Optional, Tuple, Union
 from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
 from common.type_checkers.Checkers import chk_type, chk_length, chk_string
+from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_RULES
 from .p4_common import matches_ipv4, matches_ipv6, valid_port,\
-    P4_ATTR_DEV_ID, P4_ATTR_DEV_NAME, P4_ATTR_DEV_VENDOR,\
-    P4_ATTR_DEV_HW_VER, P4_ATTR_DEV_SW_VER,\
+    compose_resource_endpoints, parse_resource_string_from_json,\
+    P4_ATTR_DEV_ID, P4_ATTR_DEV_NAME, P4_ATTR_DEV_ENDPOINTS,\
+    P4_ATTR_DEV_VENDOR, P4_ATTR_DEV_HW_VER, P4_ATTR_DEV_SW_VER,\
     P4_ATTR_DEV_P4BIN, P4_ATTR_DEV_P4INFO, P4_ATTR_DEV_TIMEOUT,\
     P4_VAL_DEF_VENDOR, P4_VAL_DEF_HW_VER, P4_VAL_DEF_SW_VER,\
     P4_VAL_DEF_TIMEOUT
-from .p4_manager import P4Manager, KEY_TABLE, KEY_ACTION, \
-    KEY_ACTION_PROFILE, KEY_COUNTER, KEY_DIR_COUNTER, KEY_METER, KEY_DIR_METER,\
-    KEY_CTL_PKT_METADATA
+from .p4_manager import P4Manager, \
+    KEY_TABLE, KEY_ACTION, KEY_ACTION_PROFILE, \
+    KEY_COUNTER, KEY_DIR_COUNTER, KEY_METER, KEY_DIR_METER,\
+    KEY_CTL_PKT_METADATA, KEY_DIGEST, KEY_CLONE_SESSION,\
+    KEY_ENDPOINT
 from .p4_client import WriteOperation
 
 try:
@@ -59,6 +63,8 @@ class P4Driver(_Driver):
             P4 device datapath ID (Mandatory)
         name : str
             P4 device name (Optional)
+        endpoints : list
+            List of P4 device endpoints, i.e., ports (Optional)
         vendor : str
             P4 device vendor (Optional)
         hw_ver : str
@@ -70,17 +76,22 @@ class P4Driver(_Driver):
         p4info : str
             Path to P4 info file (Optional, but must be combined with p4bin)
         timeout : int
-            Device timeout in seconds (Optional)
+            P4 device timeout in seconds (Optional)
+        rules : list
+            List of rules to configure the P4 device's pipeline
     """
 
     def __init__(self, address: str, port: int, **settings) -> None:
-        super().__init__(settings.pop('name', DRIVER_NAME), address, port, **settings)
+        super().__init__(name=DRIVER_NAME, address=address, port=port, setting=settings)
         self.__manager = None
         self.__address = address
         self.__port = int(port)
-        self.__endpoint = None
+        self.__grpc_endpoint = None
         self.__settings = settings
         self.__id = None
+        self.__name = None
+        self.__endpoints = []
+        self.__rules = {}
         self.__vendor = P4_VAL_DEF_VENDOR
         self.__hw_version = P4_VAL_DEF_HW_VER
         self.__sw_version = P4_VAL_DEF_SW_VER
@@ -97,7 +108,7 @@ class P4Driver(_Driver):
                     self.__address, self.__port)
 
         for key, value in settings.items():
-            LOGGER.info("\t%8s = %s", key, value)
+            LOGGER.info("\t%9s = %s", key, value)
 
     def Connect(self) -> bool:
         """
@@ -105,14 +116,14 @@ class P4Driver(_Driver):
 
         :return: boolean connection status.
         """
-        LOGGER.info("Connecting to P4 device %s ...", self.__endpoint)
+        LOGGER.info("Connecting to P4 device %s ...", self.__grpc_endpoint)
 
         with self.__lock:
             # Skip if already connected
             if self.__started.is_set():
                 return True
 
-            # Dynamically devise an election ID
+            # TODO: Dynamically devise an election ID
             election_id = (1, 0)
 
             # Spawn a P4 manager for this device
@@ -140,7 +151,7 @@ class P4Driver(_Driver):
 
         :return: boolean disconnection status.
         """
-        LOGGER.info("Disconnecting from P4 device %s ...", self.__endpoint)
+        LOGGER.info("Disconnecting from P4 device %s ...", self.__grpc_endpoint)
 
         # If not started, assume it is already disconnected
         if not self.__started.is_set():
@@ -167,13 +178,15 @@ class P4Driver(_Driver):
 
         :return: list of initial configuration items.
         """
-        initial_conf = []
+
+        resource_keys = [RESOURCE_ENDPOINTS] if self.__endpoints else []
 
         with self.__lock:
-            if not initial_conf:
-                LOGGER.warning("No initial configuration for P4 device %s ...",
-                               self.__endpoint)
-            return []
+            if not resource_keys:
+                LOGGER.warning("No initial configuration for P4 device {} ...".format(self.__grpc_endpoint))
+                return []
+            LOGGER.info("Initial configuration for P4 device {}:".format(self.__grpc_endpoint))
+            return self.GetConfig(resource_keys)
 
     @metered_subclass_method(METRICS_POOL)
     def GetConfig(self, resource_keys: List[str] = [])\
@@ -186,7 +199,7 @@ class P4Driver(_Driver):
         None/Exception.
         """
         LOGGER.info(
-            "Getting configuration from P4 device %s ...", self.__endpoint)
+            "Getting configuration from P4 device %s ...", self.__grpc_endpoint)
 
         # No resource keys means fetch all configuration
         if len(resource_keys) == 0:
@@ -195,7 +208,7 @@ class P4Driver(_Driver):
                 "implies getting all resource keys!")
             resource_keys = [
                 obj_name for obj_name, _ in self.__manager.p4_objects.items()
-            ]
+            ] + [RESOURCE_ENDPOINTS] + [RESOURCE_RULES]
 
         # Verify the input type
         chk_type("resources", resource_keys, list)
@@ -214,7 +227,7 @@ class P4Driver(_Driver):
         changes requested.
         """
         LOGGER.info(
-            "Setting configuration to P4 device %s ...", self.__endpoint)
+            "Setting configuration to P4 device %s ...", self.__grpc_endpoint)
 
         if not resources or len(resources) == 0:
             LOGGER.warning(
@@ -238,7 +251,7 @@ class P4Driver(_Driver):
         deletions requested.
         """
         LOGGER.info(
-            "Deleting configuration from P4 device %s ...", self.__endpoint)
+            "Deleting configuration from P4 device %s ...", self.__grpc_endpoint)
 
         if not resources or len(resources) == 0:
             LOGGER.warning(
@@ -308,6 +321,14 @@ class P4Driver(_Driver):
         """
         return self.__manager
 
+    def is_started(self):
+        """
+        Check if an instance of the P4 manager is started.
+
+        :return: boolean P4 manager instance status
+        """
+        return self.__started.is_set()
+
     def __parse_and_validate_settings(self):
         """
         Verify that the driver inputs comply to what is expected.
@@ -319,7 +340,7 @@ class P4Driver(_Driver):
             f"{self.__address} not a valid IPv4 or IPv6 address"
         assert valid_port(self.__port), \
             f"{self.__port} not a valid transport port"
-        self.__endpoint = f"{self.__address}:{self.__port}"
+        self.__grpc_endpoint = f"{self.__address}:{self.__port}"
 
         # Device ID
         try:
@@ -337,6 +358,16 @@ class P4Driver(_Driver):
                 "No device name is provided. Setting default name: %s",
                 self.__name)
 
+        # Device endpoints
+        if P4_ATTR_DEV_ENDPOINTS in self.__settings:
+            endpoints = self.__settings.get(P4_ATTR_DEV_ENDPOINTS, [])
+            endpoint_resources = compose_resource_endpoints(endpoints)
+            if endpoint_resources:
+                LOGGER.info("Setting endpoints: {}".format(endpoint_resources))
+                self.SetConfig(endpoint_resources)
+        else:
+            LOGGER.warning("No device endpoints are provided.")
+
         # Device vendor
         if P4_ATTR_DEV_VENDOR in self.__settings:
             self.__vendor = self.__settings.get(P4_ATTR_DEV_VENDOR)
@@ -365,7 +396,7 @@ class P4Driver(_Driver):
         if P4_ATTR_DEV_P4BIN in self.__settings:
             self.__p4bin_path = self.__settings.get(P4_ATTR_DEV_P4BIN)
             assert os.path.exists(self.__p4bin_path),\
-                "Invalid path to p4bin file"
+                "Invalid path to p4bin file: {}".format(self.__p4bin_path)
             assert P4_ATTR_DEV_P4INFO in self.__settings,\
                 "p4info and p4bin settings must be provided together"
 
@@ -373,7 +404,7 @@ class P4Driver(_Driver):
         if P4_ATTR_DEV_P4INFO in self.__settings:
             self.__p4info_path = self.__settings.get(P4_ATTR_DEV_P4INFO)
             assert os.path.exists(self.__p4info_path),\
-                "Invalid path to p4info file"
+                "Invalid path to p4info file: {}".format(self.__p4info_path)
             assert P4_ATTR_DEV_P4BIN in self.__settings,\
                 "p4info and p4bin settings must be provided together"
 
@@ -404,7 +435,7 @@ class P4Driver(_Driver):
         """
         resources = []
 
-        LOGGER.debug("GetConfig() -> Keys: %s", resource_keys)
+        LOGGER.info("GetConfig() -> Keys: {}".format(resource_keys))
 
         for resource_key in resource_keys:
             entries = []
@@ -423,8 +454,7 @@ class P4Driver(_Driver):
                             entries.append(c_entries)
                 elif KEY_DIR_COUNTER == resource_key:
                     for d_cnt_name in self.__manager.get_direct_counter_names():
-                        dc_entries = \
-                            self.__manager.direct_counter_entries_to_json(
+                        dc_entries = self.__manager.direct_counter_entries_to_json(
                                 d_cnt_name)
                         if dc_entries:
                             entries.append(dc_entries)
@@ -436,28 +466,35 @@ class P4Driver(_Driver):
                             entries.append(m_entries)
                 elif KEY_DIR_METER == resource_key:
                     for d_meter_name in self.__manager.get_direct_meter_names():
-                        dm_entries = \
-                            self.__manager.direct_meter_entries_to_json(
+                        dm_entries = self.__manager.direct_meter_entries_to_json(
                                 d_meter_name)
                         if dm_entries:
                             entries.append(dm_entries)
                 elif KEY_ACTION_PROFILE == resource_key:
                     for ap_name in self.__manager.get_action_profile_names():
-                        ap_entries = \
-                            self.__manager.action_prof_member_entries_to_json(
+                        ap_entries = self.__manager.action_prof_member_entries_to_json(
                                 ap_name)
                         if ap_entries:
                             entries.append(ap_entries)
                 elif KEY_ACTION == resource_key:
-                    #To be implemented or deprecated
-                    pass
-                elif '__endpoints__' == resource_key:
-                    #Not Supported for P4 devices
+                    # To be implemented or deprecated
                     pass
                 elif KEY_CTL_PKT_METADATA == resource_key:
+                    #TODO: Handle controller packet metadata
                     msg = f"{resource_key.capitalize()} is not a " \
                           f"retrievable resource"
-                    raise Exception(msg)
+                    LOGGER.warning("%s", msg)
+                elif KEY_DIGEST == resource_key:
+                    #TODO: Handle digests
+                    msg = f"{resource_key.capitalize()} is not a " \
+                          f"retrievable resource"
+                    LOGGER.warning("%s", msg)
+                elif RESOURCE_ENDPOINTS == resource_key:
+                    resources += self.__endpoints
+                    continue
+                elif RESOURCE_RULES == resource_key:
+                     resources = self.__rules_into_resources()
+                     continue
                 else:
                     msg = f"GetConfig failed due to invalid " \
                           f"resource key: {resource_key}"
@@ -465,8 +502,10 @@ class P4Driver(_Driver):
                 resources.append(
                     (resource_key, entries if entries else None)
                 )
-            except Exception as ex:  # pylint: disable=broad-except
-                resources.append((resource_key, ex))
+            except Exception as e:  # pylint: disable=broad-except
+                resources.append((resource_key, e))
+
+        LOGGER.info("GetConfig() -> Results: %s", resources)
 
         return resources
 
@@ -480,6 +519,8 @@ class P4Driver(_Driver):
         """
         results = []
 
+        LOGGER.info("SetConfig -> Resources {}".format(resources))
+
         for i, resource in enumerate(resources):
             str_resource_name = f"resources[#{i}]"
             resource_key = ""
@@ -499,11 +540,15 @@ class P4Driver(_Driver):
                 continue
 
             try:
-                resource_value = json.loads(resource_value)
-            except Exception:  # pylint: disable=broad-except
-                pass
+                # Rules are JSON-based, endpoints are not
+                if "endpoint" not in resource_key:
+                    resource_value = json.loads(resource_value)
+            except Exception as e:  # pylint: disable=broad-except
+                LOGGER.exception("Exception validating resource value {}".format(resource_value))
+                results.append(e)
+                continue
 
-            LOGGER.debug(
+            LOGGER.info(
                 "SetConfig() -> Key: %s - Value: %s",
                 resource_key, resource_value)
 
@@ -512,13 +557,22 @@ class P4Driver(_Driver):
             # to be inserted already exists, thus simply needs an update.
             operation = WriteOperation.insert
 
+            # Dataplane and cache rule insertion process
             try:
-                self.__apply_operation(resource_key, resource_value, operation)
-                results.append(True)
-            except Exception as ex:  # pylint: disable=broad-except
-                results.append(ex)
+                r2, r3 = False, True
+                r1 = self.__cache_rule_insert(resource_key, resource_value, operation)
+                # Cache insertion succeeded, proceed to dataplane
+                if r1:
+                    r2 = self.__apply_operation(resource_key, resource_value, operation)
+                # Dataplane insertion did not succeed --> Revert caching
+                if not r2 and r1:
+                    r3 = self.__cache_rule_remove(resource_key)
+                results.append(r1 & r2 & r3)
+            except Exception as e:  # pylint: disable=broad-except
+                results.append(e)
+                continue
 
-        print(results)
+        LOGGER.info("SetConfig() -> Results: {}".format(results))
 
         return results
 
@@ -552,21 +606,31 @@ class P4Driver(_Driver):
 
             try:
                 resource_value = json.loads(resource_value)
-            except Exception:  # pylint: disable=broad-except
-                pass
+            except Exception as e:  # pylint: disable=broad-except
+                results.append(e)
+                continue
 
-            LOGGER.debug("DeleteConfig() -> Key: %s - Value: %s",
+            LOGGER.info("DeleteConfig() -> Key: %s - Value: %s",
                          resource_key, resource_value)
 
             operation = WriteOperation.delete
 
+            # Dataplane and cache rule removal process
             try:
-                self.__apply_operation(resource_key, resource_value, operation)
-                results.append(True)
-            except Exception as ex:  # pylint: disable=broad-except
-                results.append(ex)
+                r2, r3 = False, True
+                r1 = self.__cache_rule_remove(resource_key)
+                # Cache removal succeeded, proceed to dataplane
+                if r1:
+                    r2 = self.__apply_operation(resource_key, resource_value, operation)
+                # Dataplane removal did not succeed --> Revert caching
+                if not r2 and r1:
+                    r3 = self.__cache_rule_insert(resource_key, resource_value, WriteOperation.insert)
+                results.append(r1 & r2 & r3)
+            except Exception as e:  # pylint: disable=broad-except
+                results.append(e)
+                continue
 
-        print(results)
+        LOGGER.info("DeleteConfig() -> Results: {}".format(results))
 
         return results
 
@@ -583,35 +647,85 @@ class P4Driver(_Driver):
         """
 
         # Apply settings to the various tables
-        if KEY_TABLE == resource_key:
+        if KEY_TABLE in resource_key:
             self.__manager.table_entry_operation_from_json(
                 resource_value, operation)
-        elif KEY_COUNTER == resource_key:
+        elif KEY_COUNTER in resource_key:
             self.__manager.counter_entry_operation_from_json(
                 resource_value, operation)
-        elif KEY_DIR_COUNTER == resource_key:
+        elif KEY_DIR_COUNTER in resource_key:
             self.__manager.direct_counter_entry_operation_from_json(
                 resource_value, operation)
-        elif KEY_METER == resource_key:
+        elif KEY_METER in resource_key:
             self.__manager.meter_entry_operation_from_json(
                 resource_value, operation)
-        elif KEY_DIR_METER == resource_key:
+        elif KEY_DIR_METER in resource_key:
             self.__manager.direct_meter_entry_operation_from_json(
                 resource_value, operation)
-        elif KEY_ACTION_PROFILE == resource_key:
+        elif KEY_ACTION_PROFILE in resource_key:
             self.__manager.action_prof_member_entry_operation_from_json(
                 resource_value, operation)
             self.__manager.action_prof_group_entry_operation_from_json(
                 resource_value, operation)
-        elif KEY_CTL_PKT_METADATA == resource_key:
+        elif KEY_CLONE_SESSION in resource_key:
+            self.__manager.clone_session_entry_operation_from_json(
+                resource_value, operation)
+        elif KEY_CTL_PKT_METADATA in resource_key:
             msg = f"{resource_key.capitalize()} is not a " \
                   f"configurable resource"
             raise Exception(msg)
+        elif KEY_DIGEST in resource_key:
+            msg = f"{resource_key.capitalize()} is not a " \
+                  f"configurable resource"
+            raise Exception(msg)
+        elif KEY_ENDPOINT in resource_key:
+            self.__endpoints.append((resource_key, resource_value))
         else:
             msg = f"{operation} on invalid key {resource_key}"
             LOGGER.error(msg)
             raise Exception(msg)
 
-        LOGGER.debug("%s operation: %s", resource_key.capitalize(), operation)
+        return True
+
+    def __cache_rule_insert(self, resource_key, resource_value, operation):
+        """
+        Insert a new rule into the rule cache or update an existing one.
+
+        :param resource_key: P4 resource key
+        :param resource_value: P4 resource value in JSON format
+        :param operation: write operation (i.e., insert, update) to apply
+        :return: True if new rule is inserted or existing is updated, otherwise False
+        """
+        if (resource_key in self.__rules.keys()) and (operation == WriteOperation.insert):
+            LOGGER.error("Attempting to insert an existing rule key: {}".format(resource_key))
+            return False
+        elif (resource_key not in self.__rules.keys()) and (operation == WriteOperation.update):
+            LOGGER.error("Attempting to update a non-existing rule key: {}".format(resource_key))
+            return False
+        elif (resource_key in self.__rules.keys()) and (operation == WriteOperation.update):
+            LOGGER.warning("Updating an existing rule key: {}".format(resource_key))
+        self.__rules[resource_key] = resource_value
+        return True
+
+    def __cache_rule_remove(self, resource_key):
+        """
+        Remove an existing rule from the rule cache.
 
+        :param resource_key: P4 resource key
+        :return: True if existing rule is removed, otherwise False
+        """
+        if resource_key not in self.__rules.keys():
+            LOGGER.error("Attempting to remove a non-existing rule key: {}".format(resource_key))
+            return False
+        self.__rules.pop(resource_key)
         return True
+
+    def __rules_into_resources(self):
+        """
+        Transform rules from the driver's rule map into
+        resources exposed through the SBI API.
+        """
+        resource_list = []
+        for rule_key, rule_val in self.__rules.items():
+            resource_list.append((rule_key, rule_val))
+        return resource_list
diff --git a/src/device/service/drivers/p4/p4_manager.py b/src/device/service/drivers/p4/p4_manager.py
index f6684412a4d650ecb909632b9ddcbc3d17a55a5c..210422ed8de2559b56fa22da4e36f154b7d03b99 100644
--- a/src/device/service/drivers/p4/p4_manager.py
+++ b/src/device/service/drivers/p4/p4_manager.py
@@ -35,7 +35,8 @@ try:
     from .p4_common import encode,\
         parse_resource_string_from_json, parse_resource_integer_from_json,\
         parse_resource_bytes_from_json, parse_match_operations_from_json,\
-        parse_action_parameters_from_json, parse_integer_list_from_json
+        parse_action_parameters_from_json, parse_integer_list_from_json,\
+        parse_replicas_from_json
     from .p4_exception import UserError, InvalidP4InfoError
 except ImportError:
     from p4_client import P4RuntimeClient, P4RuntimeException,\
@@ -58,6 +59,7 @@ CONTEXT = Context()
 CLIENTS = {}
 
 # Constant P4 entities
+KEYS_P4 = []
 KEY_TABLE = "table"
 KEY_ACTION = "action"
 KEY_ACTION_PROFILE = "action_profile"
@@ -66,6 +68,11 @@ KEY_DIR_COUNTER = "direct_counter"
 KEY_METER = "meter"
 KEY_DIR_METER = "direct_meter"
 KEY_CTL_PKT_METADATA = "controller_packet_metadata"
+KEY_DIGEST = "digest"
+
+# Extra resource keys
+KEY_CLONE_SESSION = "clone_session"
+KEY_ENDPOINT = "endpoint"
 
 
 def get_context():
@@ -83,19 +90,20 @@ def get_table_type(table):
     :param table: P4 table
     :return: P4 table type
     """
-    for m_f in table.match_fields:
-        if m_f.match_type == p4info_pb2.MatchField.EXACT:
-            return p4info_pb2.MatchField.EXACT
-        if m_f.match_type == p4info_pb2.MatchField.LPM:
-            return p4info_pb2.MatchField.LPM
-        if m_f.match_type == p4info_pb2.MatchField.TERNARY:
-            return p4info_pb2.MatchField.TERNARY
-        if m_f.match_type == p4info_pb2.MatchField.RANGE:
-            return p4info_pb2.MatchField.RANGE
-        if m_f.match_type == p4info_pb2.MatchField.OPTIONAL:
-            return p4info_pb2.MatchField.OPTIONAL
-    return None
+    is_ternary = False
 
+    for m_f in table.match_fields:
+        # LPM and range are special forms of ternary
+        if m_f.match_type in [
+            p4info_pb2.MatchField.TERNARY,
+            p4info_pb2.MatchField.LPM,
+            p4info_pb2.MatchField.RANGE
+        ]:
+            is_ternary = True
+
+    if is_ternary:
+        return p4info_pb2.MatchField.TERNARY
+    return p4info_pb2.MatchField.EXACT
 
 def match_type_to_str(match_type):
     """
@@ -132,12 +140,12 @@ class P4Manager:
         self.__id = device_id
         self.__ip_address = ip_address
         self.__port = int(port)
-        self.__endpoint = f"{self.__ip_address}:{self.__port}"
+        self.__grpc_endpoint = f"{self.__ip_address}:{self.__port}"
         self.key_id = ip_address+str(port)
         CLIENTS[self.key_id] = P4RuntimeClient(
-            self.__id, self.__endpoint, election_id, role_name, ssl_options)
+            self.__id, self.__grpc_endpoint, election_id, role_name, ssl_options)
         self.__p4info = None
-        
+
         self.local_client = CLIENTS[self.key_id]
 
         # Internal memory for whitebox management
@@ -146,14 +154,14 @@ class P4Manager:
 
         # | -> P4 entities
         self.table_entries = {}
+        self.action_profile_members = {}
+        self.action_profile_groups = {}
         self.counter_entries = {}
         self.direct_counter_entries = {}
         self.meter_entries = {}
         self.direct_meter_entries = {}
-        self.multicast_groups = {}
         self.clone_session_entries = {}
-        self.action_profile_members = {}
-        self.action_profile_groups = {}
+        self.multicast_groups = {}
 
     def start(self, p4bin_path, p4info_path):
         """
@@ -234,7 +242,7 @@ class P4Manager:
         self.__id = None
         self.__ip_address = None
         self.__port = None
-        self.__endpoint = None
+        self.__grpc_endpoint = None
         self.__clear_state()
 
     def __clear_state(self):
@@ -244,14 +252,14 @@ class P4Manager:
         :return: void
         """
         self.table_entries.clear()
+        self.action_profile_members.clear()
+        self.action_profile_groups.clear()
         self.counter_entries.clear()
         self.direct_counter_entries.clear()
         self.meter_entries.clear()
         self.direct_meter_entries.clear()
-        self.multicast_groups.clear()
         self.clone_session_entries.clear()
-        self.action_profile_members.clear()
-        self.action_profile_groups.clear()
+        self.multicast_groups.clear()
         self.p4_objects.clear()
 
     def __init_objects(self):
@@ -264,7 +272,7 @@ class P4Manager:
         global KEY_TABLE, KEY_ACTION, KEY_ACTION_PROFILE, \
             KEY_COUNTER, KEY_DIR_COUNTER, \
             KEY_METER, KEY_DIR_METER, \
-            KEY_CTL_PKT_METADATA
+            KEY_CTL_PKT_METADATA, KEY_DIGEST, KEYS_P4
 
         KEY_TABLE = P4Type.table.name
         KEY_ACTION = P4Type.action.name
@@ -274,12 +282,15 @@ class P4Manager:
         KEY_METER = P4Type.meter.name
         KEY_DIR_METER = P4Type.direct_meter.name
         KEY_CTL_PKT_METADATA = P4Type.controller_packet_metadata.name
-        assert (k for k in [
+        KEY_DIGEST = P4Type.digest.name
+
+        KEYS_P4 = [
             KEY_TABLE, KEY_ACTION, KEY_ACTION_PROFILE,
             KEY_COUNTER, KEY_DIR_COUNTER,
             KEY_METER, KEY_DIR_METER,
-            KEY_CTL_PKT_METADATA
-        ])
+            KEY_CTL_PKT_METADATA, KEY_DIGEST
+        ]
+        assert (k for k in KEYS_P4)
 
         if not self.p4_objects:
             LOGGER.warning(
@@ -292,6 +303,11 @@ class P4Manager:
             for table in self.p4_objects[KEY_TABLE]:
                 self.table_entries[table.name] = []
 
+        if KEY_ACTION_PROFILE in self.p4_objects:
+            for act_prof in self.p4_objects[KEY_ACTION_PROFILE]:
+                self.action_profile_members[act_prof.name] = []
+                self.action_profile_groups[act_prof.name] = []
+
         if KEY_COUNTER in self.p4_objects:
             for cnt in self.p4_objects[KEY_COUNTER]:
                 self.counter_entries[cnt.name] = []
@@ -308,11 +324,6 @@ class P4Manager:
             for d_meter in self.p4_objects[KEY_DIR_METER]:
                 self.direct_meter_entries[d_meter.name] = []
 
-        if KEY_ACTION_PROFILE in self.p4_objects:
-            for act_prof in self.p4_objects[KEY_ACTION_PROFILE]:
-                self.action_profile_members[act_prof.name] = []
-                self.action_profile_groups[act_prof.name] = []
-
     def __discover_objects(self):
         """
         Discover and store all P4 objects.
@@ -509,6 +520,20 @@ class P4Manager:
                 return pkt_meta
         return None
 
+    def get_digest(self, digest_name):
+        """
+        Get a digest object by name.
+
+        :param digest_name: name of a digest object
+        :return: digest object or None
+        """
+        if KEY_DIGEST not in self.p4_objects:
+            return None
+        for dg in self.p4_objects[KEY_DIGEST]:
+            if dg == digest_name.name:
+                return digest_name
+        return None
+
     def get_resource_keys(self):
         """
         Retrieve the available P4 resource keys.
@@ -561,15 +586,15 @@ class P4Manager:
         self.table_entries[table_name] = []
 
         try:
-            for count, table_entry in enumerate(
-                    TableEntry(self.local_client, table_name)(action=action_name).read()):
-                LOGGER.debug(
-                    "Table %s - Entry %d\n%s", table_name, count, table_entry)
+            entries = TableEntry(self.local_client, table_name).read()
+            assert self.local_client
+            for table_entry in entries:
                 self.table_entries[table_name].append(table_entry)
             return self.table_entries[table_name]
         except P4RuntimeException as ex:
-            LOGGER.error(ex)
-            return []
+            LOGGER.error("Failed to get table %s entries: %s",
+                         table_name, str(ex))
+        return []
 
     def table_entries_to_json(self, table_name):
         """
@@ -634,10 +659,14 @@ class P4Manager:
         :return: number of P4 table entries or negative integer
         upon missing table
         """
-        entries = self.get_table_entries(table_name, action_name)
-        if entries is None:
-            return -1
-        return len(entries)
+        count = 0
+        try:
+            entries = TableEntry(self.local_client, table_name).read()
+            count = sum(1 for _ in entries)
+        except Exception as e:  # pylint: disable=broad-except
+            LOGGER.error("Failed to read entries of table: %s", table_name)
+
+        return count
 
     def count_table_entries_all(self):
         """
@@ -675,7 +704,7 @@ class P4Manager:
         metadata = parse_resource_bytes_from_json(json_resource, "metadata")
 
         if operation in [WriteOperation.insert, WriteOperation.update]:
-            LOGGER.debug("Table entry to insert/update: %s", json_resource)
+            LOGGER.info("Table entry to insert/update: %s", json_resource)
             return self.insert_table_entry(
                 table_name=table_name,
                 match_map=match_map,
@@ -685,7 +714,7 @@ class P4Manager:
                 metadata=metadata if metadata else None
             )
         if operation == WriteOperation.delete:
-            LOGGER.debug("Table entry to delete: %s", json_resource)
+            LOGGER.info("Table entry to delete: %s", json_resource)
             return self.delete_table_entry(
                 table_name=table_name,
                 match_map=match_map,
@@ -700,7 +729,7 @@ class P4Manager:
             cnt_pkt=-1, cnt_byte=-1):
         """
         Insert an entry into an exact match table.
-    
+
         :param table_name: P4 table name
         :param match_map: Map of match operations
         :param action_name: Action name
@@ -712,45 +741,45 @@ class P4Manager:
         """
         assert match_map, "Table entry without match operations is not accepted"
         assert action_name, "Table entry without action is not accepted"
-    
+
         table_entry = TableEntry(self.local_client, table_name)(action=action_name)
-    
+
         for match_k, match_v in match_map.items():
             table_entry.match[match_k] = match_v
-    
+
         for action_k, action_v in action_params.items():
             table_entry.action[action_k] = action_v
-    
+
         if metadata:
             table_entry.metadata = metadata
-    
+
         if cnt_pkt > 0:
             table_entry.counter_data.packet_count = cnt_pkt
-    
+
         if cnt_byte > 0:
             table_entry.counter_data.byte_count = cnt_byte
-    
+
         ex_msg = ""
         try:
             table_entry.insert()
             LOGGER.info("Inserted exact table entry: %s", table_entry)
         except (P4RuntimeException, P4RuntimeWriteException) as ex:
-            raise P4RuntimeException from ex
-    
+            ex_msg = str(ex)
+            LOGGER.warning(ex)
+
         # Table entry exists, needs to be modified
         if "ALREADY_EXISTS" in ex_msg:
             table_entry.modify()
             LOGGER.info("Updated exact table entry: %s", table_entry)
-    
+
         return table_entry
-    
-    
+
     def insert_table_entry_ternary(self,
             table_name, match_map, action_name, action_params, metadata,
             priority, cnt_pkt=-1, cnt_byte=-1):
         """
         Insert an entry into a ternary match table.
-    
+
         :param table_name: P4 table name
         :param match_map: Map of match operations
         :param action_name: Action name
@@ -763,47 +792,47 @@ class P4Manager:
         """
         assert match_map, "Table entry without match operations is not accepted"
         assert action_name, "Table entry without action is not accepted"
-    
+
         table_entry = TableEntry(self.local_client, table_name)(action=action_name)
-    
+
         for match_k, match_v in match_map.items():
             table_entry.match[match_k] = match_v
-    
+
         for action_k, action_v in action_params.items():
             table_entry.action[action_k] = action_v
-    
+
         table_entry.priority = priority
-    
+
         if metadata:
             table_entry.metadata = metadata
-    
+
         if cnt_pkt > 0:
             table_entry.counter_data.packet_count = cnt_pkt
-    
+
         if cnt_byte > 0:
             table_entry.counter_data.byte_count = cnt_byte
-    
+
         ex_msg = ""
         try:
             table_entry.insert()
             LOGGER.info("Inserted ternary table entry: %s", table_entry)
         except (P4RuntimeException, P4RuntimeWriteException) as ex:
-            raise P4RuntimeException from ex
-    
+            ex_msg = str(ex)
+            LOGGER.error(ex)
+
         # Table entry exists, needs to be modified
         if "ALREADY_EXISTS" in ex_msg:
             table_entry.modify()
             LOGGER.info("Updated ternary table entry: %s", table_entry)
-    
+
         return table_entry
-    
-    
+
     def insert_table_entry_range(self,
             table_name, match_map, action_name, action_params, metadata,
             priority, cnt_pkt=-1, cnt_byte=-1):  # pylint: disable=unused-argument
         """
         Insert an entry into a range match table.
-    
+
         :param table_name: P4 table name
         :param match_map: Map of match operations
         :param action_name: Action name
@@ -816,17 +845,16 @@ class P4Manager:
         """
         assert match_map, "Table entry without match operations is not accepted"
         assert action_name, "Table entry without action is not accepted"
-    
+
         raise NotImplementedError(
             "Range-based table insertion not implemented yet")
-    
-    
+
     def insert_table_entry_optional(self,
             table_name, match_map, action_name, action_params, metadata,
             priority, cnt_pkt=-1, cnt_byte=-1):  # pylint: disable=unused-argument
         """
         Insert an entry into an optional match table.
-    
+
         :param table_name: P4 table name
         :param match_map: Map of match operations
         :param action_name: Action name
@@ -839,7 +867,7 @@ class P4Manager:
         """
         assert match_map, "Table entry without match operations is not accepted"
         assert action_name, "Table entry without action is not accepted"
-    
+
         raise NotImplementedError(
             "Optional-based table insertion not implemented yet")
 
@@ -869,32 +897,36 @@ class P4Manager:
         assert table, \
             "P4 pipeline does not implement table " + table_name
 
-        if not get_table_type(table):
+        table_type = get_table_type(table)
+
+        if not table_type:
             msg = f"Table {table_name} is undefined, cannot insert entry"
             LOGGER.error(msg)
             raise UserError(msg)
 
+        LOGGER.debug("Table {}: {}".format(table_name, match_type_to_str(table_type)))
+
         # Exact match is supported
-        if get_table_type(table) == p4info_pb2.MatchField.EXACT:
+        if table_type == p4info_pb2.MatchField.EXACT:
             return self.insert_table_entry_exact(
                 table_name, match_map, action_name, action_params, metadata,
                 cnt_pkt, cnt_byte)
 
         # Ternary and LPM matches are supported
-        if get_table_type(table) in \
+        if table_type in \
                 [p4info_pb2.MatchField.TERNARY, p4info_pb2.MatchField.LPM]:
             return self.insert_table_entry_ternary(
                 table_name, match_map, action_name, action_params, metadata,
                 priority, cnt_pkt, cnt_byte)
 
         # TODO: Cover RANGE match  # pylint: disable=W0511
-        if get_table_type(table) == p4info_pb2.MatchField.RANGE:
+        if table_type == p4info_pb2.MatchField.RANGE:
             return self.insert_table_entry_range(
                 table_name, match_map, action_name, action_params, metadata,
                 priority, cnt_pkt, cnt_byte)
 
         # TODO: Cover OPTIONAL match  # pylint: disable=W0511
-        if get_table_type(table) == p4info_pb2.MatchField.OPTIONAL:
+        if table_type == p4info_pb2.MatchField.OPTIONAL:
             return self.insert_table_entry_optional(
                 table_name, match_map, action_name, action_params, metadata,
                 priority, cnt_pkt, cnt_byte)
@@ -917,7 +949,9 @@ class P4Manager:
         assert table, \
             "P4 pipeline does not implement table " + table_name
 
-        if not get_table_type(table):
+        table_type = get_table_type(table)
+
+        if not table_type:
             msg = f"Table {table_name} is undefined, cannot delete entry"
             LOGGER.error(msg)
             raise UserError(msg)
@@ -930,7 +964,7 @@ class P4Manager:
         for action_k, action_v in action_params.items():
             table_entry.action[action_k] = action_v
 
-        if get_table_type(table) in \
+        if table_type in \
                 [p4info_pb2.MatchField.TERNARY, p4info_pb2.MatchField.LPM]:
             if priority == 0:
                 msg = f"Table {table_name} is ternary, priority must be != 0"
@@ -938,15 +972,25 @@ class P4Manager:
                 raise UserError(msg)
 
         # TODO: Ensure correctness of RANGE & OPTIONAL  # pylint: disable=W0511
-        if get_table_type(table) in \
+        if table_type in \
                 [p4info_pb2.MatchField.RANGE, p4info_pb2.MatchField.OPTIONAL]:
             raise NotImplementedError(
                 "Range and optional-based table deletion not implemented yet")
 
         table_entry.priority = priority
 
-        table_entry.delete()
-        LOGGER.info("Deleted entry %s from table: %s", table_entry, table_name)
+        ex_msg = ""
+        try:
+            table_entry.delete()
+            LOGGER.info("Deleted entry %s from table: %s", table_entry, table_name)
+        except (P4RuntimeException, P4RuntimeWriteException) as ex:
+            ex_msg = str(ex)
+            LOGGER.warning(ex)
+
+        # Table entry exists, needs to be modified
+        if "NOT_FOUND" in ex_msg:
+            # TODO: No way to discriminate between a modified entry and an actual table miss
+            LOGGER.warning("Table entry was initially modified, thus cannot be removed: %s", table_entry)
 
         return table_entry
 
@@ -1172,7 +1216,8 @@ class P4Manager:
                 self.counter_entries[cnt_name].append(cnt_entry)
             return self.counter_entries[cnt_name]
         except P4RuntimeException as ex:
-            LOGGER.error(ex)
+            LOGGER.error("Failed to get counter %s entries: %s",
+                         cnt_name, str(ex))
             return []
 
     def counter_entries_to_json(self, cnt_name):
@@ -1620,7 +1665,8 @@ class P4Manager:
                 self.meter_entries[meter_name].append(meter_entry)
             return self.meter_entries[meter_name]
         except P4RuntimeException as ex:
-            LOGGER.error(ex)
+            LOGGER.error("Failed to get meter %s entries: %s",
+                         meter_name, str(ex))
             return []
 
     def meter_entries_to_json(self, meter_name):
@@ -1852,7 +1898,8 @@ class P4Manager:
                 self.direct_meter_entries[d_meter_name].append(d_meter_entry)
             return self.direct_meter_entries[d_meter_name]
         except P4RuntimeException as ex:
-            LOGGER.error(ex)
+            LOGGER.error("Failed to get direct meter %s entries: %s",
+                         d_meter_name, str(ex))
             return []
 
     def direct_meter_entries_to_json(self, d_meter_name):
@@ -2094,7 +2141,8 @@ class P4Manager:
                 self.action_profile_members[ap_name].append(ap_entry)
             return self.action_profile_members[ap_name]
         except P4RuntimeException as ex:
-            LOGGER.error(ex)
+            LOGGER.error("Failed to get action profile member %s entries: %s",
+                         ap_name, str(ex))
             return []
 
     def action_prof_member_entries_to_json(self, ap_name):
@@ -2357,7 +2405,8 @@ class P4Manager:
                 self.action_profile_groups[ap_name].append(ap_entry)
             return self.action_profile_groups[ap_name]
         except P4RuntimeException as ex:
-            LOGGER.error(ex)
+            LOGGER.error("Failed to get action profile group %s entries: %s",
+                         ap_name, str(ex))
             return []
 
     def count_action_prof_group_entries(self, ap_name):
@@ -2880,14 +2929,13 @@ class P4Manager:
             json_resource, "session-id")
 
         if operation in [WriteOperation.insert, WriteOperation.update]:
-            ports = parse_integer_list_from_json(
-                json_resource, "ports", "port")
+            replicas = parse_replicas_from_json(json_resource)
 
             LOGGER.debug(
                 "Clone session entry to insert/update: %s", json_resource)
             return self.insert_clone_session_entry(
                 session_id=session_id,
-                ports=ports
+                replicas=replicas
             )
         if operation == WriteOperation.delete:
             LOGGER.debug(
@@ -2897,22 +2945,24 @@ class P4Manager:
             )
         return None
 
-    def insert_clone_session_entry(self, session_id, ports):
+    def insert_clone_session_entry(self, session_id, replicas):
         """
         Insert a new clone session.
 
         :param session_id: id of a clone session
-        :param ports: list of egress ports to clone session
+        :param replicas: list of egress ports to clone session
         :return: inserted clone session
         """
         assert session_id > 0, \
             "Clone session " + session_id + " must be > 0"
-        assert ports, \
-            "No clone session ports are provided"
+        assert replicas, \
+            "No clone session replicas are provided"
+        assert isinstance(replicas, dict), \
+            "Clone session replicas must be a dictionary"
 
         session = CloneSessionEntry(self.local_client, session_id)
-        for p in ports:
-            session.add(p, 1)
+        for eg_port,instance in replicas.items():
+            session.add(eg_port, instance)
 
         ex_msg = ""
         try:
@@ -2943,12 +2993,15 @@ class P4Manager:
             "Clone session " + session_id + " must be > 0"
 
         session = CloneSessionEntry(self.local_client, session_id)
-        session.delete()
+
+        try:
+            session.delete()
+            LOGGER.info("Deleted clone session %d", session_id)
+        except (P4RuntimeException, P4RuntimeWriteException) as ex:
+            LOGGER.error(ex)
 
         if session_id in self.clone_session_entries:
             del self.clone_session_entries[session_id]
-        LOGGER.info(
-            "Deleted clone session %d", session_id)
 
         return session
 
@@ -3786,6 +3839,7 @@ class _P4EntityBase(_EntityBase):
     def __init__(self, p4_client, p4_type, entity_type, p4runtime_cls, name=None,
                  modify_only=False):
         super().__init__(p4_client, entity_type, p4runtime_cls, modify_only)
+        assert self.local_client, "No local P4 client instance"
         self._p4_type = p4_type
         if name is None:
             raise UserError(
@@ -3815,7 +3869,7 @@ class ActionProfileMember(_P4EntityBase):
     """
 
     def __init__(self, p4_client, action_profile_name=None):
-        super().__init__( p4_client,
+        super().__init__(p4_client,
             P4Type.action_profile, P4RuntimeEntity.action_profile_member,
             p4runtime_pb2.ActionProfileMember, action_profile_name)
         self.member_id = 0
@@ -3981,7 +4035,7 @@ class ActionProfileGroup(_P4EntityBase):
     """
 
     def __init__(self, p4_client, action_profile_name=None):
-        super().__init__( p4_client,
+        super().__init__(p4_client,
             P4Type.action_profile, P4RuntimeEntity.action_profile_group,
             p4runtime_pb2.ActionProfileGroup, action_profile_name)
         self.group_id = 0
@@ -5055,7 +5109,7 @@ class CounterEntry(_CounterEntryBase):
     """
 
     def __init__(self, p4_client, counter_name=None):
-        super().__init__( p4_client,
+        super().__init__(p4_client,
             P4Type.counter, P4RuntimeEntity.counter_entry,
             p4runtime_pb2.CounterEntry, counter_name,
             modify_only=True)
@@ -5115,11 +5169,11 @@ To write to the counter, use <self>.modify
 class DirectCounterEntry(_CounterEntryBase):
     """
     Direct P4 counter entry.
-    """ 
+    """
     local_client = None
 
     def __init__(self, p4_client, direct_counter_name=None):
-        super().__init__( p4_client, 
+        super().__init__(p4_client,
             P4Type.direct_counter, P4RuntimeEntity.direct_counter_entry,
             p4runtime_pb2.DirectCounterEntry, direct_counter_name,
             modify_only=True)
@@ -5213,7 +5267,7 @@ class _MeterEntryBase(_P4EntityBase):
     """
 
     def __init__(self, p4_client, *args, **kwargs):
-        super().__init__(*args, **kwargs)
+        super().__init__(p4_client, *args, **kwargs)
         self._meter_type = self._info.spec.unit
         self.index = -1
         self.cir = -1
@@ -5910,7 +5964,7 @@ class IdleTimeoutNotification():
     """
     P4 idle timeout notification.
     """
-    
+
     local_client = None
 
     def __init__(self, p4_client):
diff --git a/src/e2e_orchestrator/service/E2EOrchestratorService.py b/src/e2e_orchestrator/service/E2EOrchestratorService.py
index 9fa7bf4bd82564c4158b5af77c0d69b0b9014289..3abef2777ba22cc63df3db1eb86ee411a4ea74c7 100644
--- a/src/e2e_orchestrator/service/E2EOrchestratorService.py
+++ b/src/e2e_orchestrator/service/E2EOrchestratorService.py
@@ -12,19 +12,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
-
 from common.Constants import ServiceNameEnum
-from common.proto.e2eorchestrator_pb2_grpc import add_E2EOrchestratorServiceServicer_to_server
 from common.Settings import get_service_port_grpc
+from common.proto.e2eorchestrator_pb2 import DESCRIPTOR as E2EORCHESTRATOR_DESCRIPTOR
+from common.proto.e2eorchestrator_pb2_grpc import add_E2EOrchestratorServiceServicer_to_server
 from common.tools.service.GenericGrpcService import GenericGrpcService
 from .E2EOrchestratorServiceServicerImpl import E2EOrchestratorServiceServicerImpl
 
-LOGGER = logging.getLogger(__name__)
-
-
 class E2EOrchestratorService(GenericGrpcService):
-    def __init__(self, cls_name: str = __name__):
+    def __init__(self, cls_name: str = __name__) -> None:
         port = get_service_port_grpc(ServiceNameEnum.E2EORCHESTRATOR)
         super().__init__(port, cls_name=cls_name)
         self.e2eorchestrator_servicer = E2EOrchestratorServiceServicerImpl()
@@ -33,3 +29,5 @@ class E2EOrchestratorService(GenericGrpcService):
         add_E2EOrchestratorServiceServicer_to_server(
             self.e2eorchestrator_servicer, self.server
         )
+
+        self.add_reflection_service_name(E2EORCHESTRATOR_DESCRIPTOR, 'E2EOrchestratorService')
diff --git a/src/forecaster/service/ForecasterService.py b/src/forecaster/service/ForecasterService.py
index 5f540cdc5bc628ae1a35e5f5ebf47ead276a413c..fedb5242d616eb8eef0a9b4e3fd5308925437acd 100644
--- a/src/forecaster/service/ForecasterService.py
+++ b/src/forecaster/service/ForecasterService.py
@@ -14,6 +14,7 @@
 
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_port_grpc
+from common.proto.forecaster_pb2 import DESCRIPTOR as FORECASTER_DESCRIPTOR
 from common.proto.forecaster_pb2_grpc import add_ForecasterServiceServicer_to_server
 from common.tools.service.GenericGrpcService import GenericGrpcService
 from .ForecasterServiceServicerImpl import ForecasterServiceServicerImpl
@@ -26,3 +27,5 @@ class ForecasterService(GenericGrpcService):
 
     def install_servicers(self):
         add_ForecasterServiceServicer_to_server(self.forecaster_servicer, self.server)
+
+        self.add_reflection_service_name(FORECASTER_DESCRIPTOR, 'ForecasterService')
diff --git a/src/pathcomp/frontend/service/PathCompService.py b/src/pathcomp/frontend/service/PathCompService.py
index c19e59e77b9b5f75f66eec07990691a76cdc36b7..6e413c14eb293f3073bfb1d80963dab58ba04308 100644
--- a/src/pathcomp/frontend/service/PathCompService.py
+++ b/src/pathcomp/frontend/service/PathCompService.py
@@ -14,8 +14,9 @@
 
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_port_grpc
-from common.tools.service.GenericGrpcService import GenericGrpcService
+from common.proto.pathcomp_pb2 import DESCRIPTOR as PATHCOMP_DESCRIPTOR
 from common.proto.pathcomp_pb2_grpc import add_PathCompServiceServicer_to_server
+from common.tools.service.GenericGrpcService import GenericGrpcService
 from .PathCompServiceServicerImpl import PathCompServiceServicerImpl
 
 class PathCompService(GenericGrpcService):
@@ -26,3 +27,5 @@ class PathCompService(GenericGrpcService):
 
     def install_servicers(self):
         add_PathCompServiceServicer_to_server(self.pathcomp_servicer, self.server)
+
+        self.add_reflection_service_name(PATHCOMP_DESCRIPTOR, 'PathCompService')
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py
index 42635bf4ad5cfd1a1a4cb174b73d26c51576af9a..b08830332f7fc6f526a19516b120e94a1a98b232 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py
@@ -22,6 +22,8 @@ from common.tools.grpc.Tools import grpc_message_to_json_string
 DEVICE_TYPE_TO_DEEPNESS = {
     DeviceTypeEnum.EMULATED_DATACENTER.value             : 90,
     DeviceTypeEnum.DATACENTER.value                      : 90,
+    DeviceTypeEnum.EMULATED_CLIENT.value                 : 90,
+    DeviceTypeEnum.CLIENT.value                          : 90,
 
     DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value          : 80,
     DeviceTypeEnum.EMULATED_IP_SDN_CONTROLLER.value      : 80,
@@ -50,6 +52,8 @@ DEVICE_TYPE_TO_DEEPNESS = {
     DeviceTypeEnum.OPTICAL_TRANSPONDER.value             : 10,
     DeviceTypeEnum.EMULATED_OPTICAL_ROADM.value          : 10,
     DeviceTypeEnum.OPTICAL_ROADM.value                   : 10,
+    DeviceTypeEnum.QKD_NODE.value                        : 10,
+    DeviceTypeEnum.OPEN_ROADM.value                      : 10,
 
     DeviceTypeEnum.EMULATED_OPTICAL_SPLITTER.value       :  0,
     DeviceTypeEnum.NETWORK.value                         :  0, # network out of our control; always delegate
diff --git a/src/service/service/ServiceService.py b/src/service/service/ServiceService.py
index b99826e5b0f6ab8b228c32f4c7811181c83c7198..e088a99ebcc2e1b8258500df33ed8e319202d41c 100644
--- a/src/service/service/ServiceService.py
+++ b/src/service/service/ServiceService.py
@@ -14,6 +14,7 @@
 
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_port_grpc
+from common.proto.service_pb2 import DESCRIPTOR as SERVICE_DESCRIPTOR
 from common.proto.service_pb2_grpc import add_ServiceServiceServicer_to_server
 from common.tools.service.GenericGrpcService import GenericGrpcService
 from .ServiceServiceServicerImpl import ServiceServiceServicerImpl
@@ -27,3 +28,5 @@ class ServiceService(GenericGrpcService):
 
     def install_servicers(self):
         add_ServiceServiceServicer_to_server(self.service_servicer, self.server)
+
+        self.add_reflection_service_name(SERVICE_DESCRIPTOR, 'ServiceService')
diff --git a/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py b/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py
index c8227975f3633ede1ee48cb1175e439615cb0543..277d6d7e1a574afafa138d517cfec4c644e25023 100644
--- a/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py
+++ b/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py
@@ -21,7 +21,9 @@ from service.service.service_handler_api.AnyTreeTools import TreeNode
 
 LOGGER = logging.getLogger(__name__)
 
-NETWORK_INSTANCE = 'teraflowsdn'
+#NETWORK_INSTANCE = 'teraflowsdn' # TODO: investigate; sometimes it does not create/delete static rules properly
+NETWORK_INSTANCE = 'default'
+DEFAULT_NETWORK_INSTANCE = 'default'
 
 RE_IF    = re.compile(r'^\/interface\[([^\]]+)\]$')
 RE_SUBIF = re.compile(r'^\/interface\[([^\]]+)\]\/subinterface\[([^\]]+)\]$')
@@ -108,12 +110,21 @@ class EndpointComposer:
         if self.ipv4_address is None: return []
         if self.ipv4_prefix_len is None: return []
         json_config_rule = json_config_rule_delete if delete else json_config_rule_set
-        config_rules = [
-            json_config_rule(*_network_instance_interface(
+
+        config_rules : List[Dict] = list()
+        if network_instance_name != DEFAULT_NETWORK_INSTANCE:
+            config_rules.append(json_config_rule(*_network_instance_interface(
                 network_instance_name, self.objekt.name, self.sub_interface_index
-            )),
-        ]
-        if not delete:
+            )))
+
+        if delete:
+            config_rules.extend([
+                json_config_rule(*_interface(
+                    self.objekt.name, index=self.sub_interface_index, address_ip=None,
+                    address_prefix=None, enabled=False
+                )),
+            ])
+        else:
             config_rules.extend([
                 json_config_rule(*_interface(
                     self.objekt.name, index=self.sub_interface_index, address_ip=self.ipv4_address,
@@ -128,6 +139,12 @@ class EndpointComposer:
             'address_ip'    : self.ipv4_address,
             'address_prefix': self.ipv4_prefix_len,
         }
+    
+    def __str__(self):
+        data = {'uuid': self.uuid}
+        if self.objekt is not None: data['name'] = self.objekt.name
+        data.update(self.dump())
+        return json.dumps(data)
 
 class DeviceComposer:
     def __init__(self, device_uuid : str) -> None:
@@ -187,7 +204,8 @@ class DeviceComposer:
                 endpoint.ipv4_prefix_len = ipv4_prefix_len
                 endpoint.sub_interface_index = int(subif_index)
                 endpoint_ip_network = netaddr.IPNetwork('{:s}/{:d}'.format(ipv4_network, ipv4_prefix_len))
-                self.connected.add(str(endpoint_ip_network.cidr))
+                if '0.0.0.0/' not in str(endpoint_ip_network.cidr):
+                    self.connected.add(str(endpoint_ip_network.cidr))
 
             match = RE_SR.match(config_rule_custom.resource_key)
             if match is not None:
@@ -211,9 +229,9 @@ class DeviceComposer:
         if self.objekt.device_type not in SELECTED_DEVICES: return []
 
         json_config_rule = json_config_rule_delete if delete else json_config_rule_set
-        config_rules = [
+        config_rules : List[Dict] = list()
+        if network_instance_name != DEFAULT_NETWORK_INSTANCE:
             json_config_rule(*_network_instance(network_instance_name, 'L3VRF'))
-        ]
         for endpoint in self.endpoints.values():
             config_rules.extend(endpoint.get_config_rules(network_instance_name, delete=delete))
         if len(self.static_routes) > 0:
@@ -240,6 +258,12 @@ class DeviceComposer:
             'static_routes' : self.static_routes,
         }
 
+    def __str__(self):
+        data = {'uuid': self.uuid}
+        if self.objekt is not None: data['name'] = self.objekt.name
+        data.update(self.dump())
+        return json.dumps(data)
+
 class ConfigRuleComposer:
     def __init__(self) -> None:
         self.objekt : Optional[Service] = None
diff --git a/src/service/service/service_handlers/l3nm_gnmi_openconfig/L3NMGnmiOpenConfigServiceHandler.py b/src/service/service/service_handlers/l3nm_gnmi_openconfig/L3NMGnmiOpenConfigServiceHandler.py
index 8aa3781a4c3c0d238d38491fb31d8dfdf9102368..4099675fa57df11b11302c210113189f0153b599 100644
--- a/src/service/service/service_handlers/l3nm_gnmi_openconfig/L3NMGnmiOpenConfigServiceHandler.py
+++ b/src/service/service/service_handlers/l3nm_gnmi_openconfig/L3NMGnmiOpenConfigServiceHandler.py
@@ -65,8 +65,9 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
 
             self.__endpoint_map[(device_uuid, endpoint_uuid)] = (device_obj.name, endpoint_obj.name)
 
+        LOGGER.debug('[pre] config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump())))
         self.__static_route_generator.compose(endpoints)
-        LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump())))
+        LOGGER.debug('[post] config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump())))
 
     def _do_configurations(
         self, config_rules_per_device : Dict[str, List[Dict]], endpoints : List[Tuple[str, str, Optional[str]]],
@@ -110,8 +111,8 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
         #network_instance_name = service_uuid.split('-')[0]
         #config_rules_per_device = self.__config_rule_composer.get_config_rules(network_instance_name, delete=False)
         config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=False)
-        LOGGER.debug('config_rules_per_device={:s}'.format(str(config_rules_per_device)))
-        results = self._do_configurations(config_rules_per_device, endpoints)
+        LOGGER.debug('config_rules_per_device={:s}'.format(json.dumps(config_rules_per_device)))
+        results = self._do_configurations(config_rules_per_device, endpoints, delete=False)
         LOGGER.debug('results={:s}'.format(str(results)))
         return results
 
@@ -128,7 +129,7 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
         #network_instance_name = service_uuid.split('-')[0]
         #config_rules_per_device = self.__config_rule_composer.get_config_rules(network_instance_name, delete=True)
         config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=True)
-        LOGGER.debug('config_rules_per_device={:s}'.format(str(config_rules_per_device)))
+        LOGGER.debug('config_rules_per_device={:s}'.format(json.dumps(config_rules_per_device)))
         results = self._do_configurations(config_rules_per_device, endpoints, delete=True)
         LOGGER.debug('results={:s}'.format(str(results)))
         return results
diff --git a/src/service/service/service_handlers/l3nm_gnmi_openconfig/StaticRouteGenerator.py b/src/service/service/service_handlers/l3nm_gnmi_openconfig/StaticRouteGenerator.py
index 201f22e637556eee9e5e78c83db23b5e3d56c85f..b315c7f4d44c7f806a68b8466c393c1668d1d3bb 100644
--- a/src/service/service/service_handlers/l3nm_gnmi_openconfig/StaticRouteGenerator.py
+++ b/src/service/service/service_handlers/l3nm_gnmi_openconfig/StaticRouteGenerator.py
@@ -63,12 +63,20 @@ class StaticRouteGenerator:
     def _compute_link_endpoints(
         self, connection_hop_list : List[Tuple[str, str, Optional[str]]]
     ) -> List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]]:
+        # In some cases connection_hop_list might contain repeated endpoints, remove them here.
+        added_connection_hops = set()
+        filtered_connection_hop_list = list()
+        for connection_hop in connection_hop_list:
+            if connection_hop in added_connection_hops: continue
+            filtered_connection_hop_list.append(connection_hop)
+            added_connection_hops.add(connection_hop)
+        connection_hop_list = filtered_connection_hop_list
+
         num_connection_hops = len(connection_hop_list)
         if num_connection_hops % 2 != 0: raise Exception('Number of connection hops must be even')
         if num_connection_hops < 4: raise Exception('Number of connection hops must be >= 4')
 
-        # Skip service endpoints (first and last)
-        it_connection_hops = iter(connection_hop_list[1:-1])
+        it_connection_hops = iter(connection_hop_list)
         return list(zip(it_connection_hops, it_connection_hops))
 
     def _compute_link_addresses(
@@ -130,6 +138,7 @@ class StaticRouteGenerator:
             if endpoint.ipv4_address is None: continue
             ip_network = _compose_ipv4_network(endpoint.ipv4_address, endpoint.ipv4_prefix_len)
 
+            if '0.0.0.0/' in str(ip_network.cidr): continue
             device.connected.add(str(ip_network.cidr))
 
     def _compute_static_routes(
diff --git a/src/service/tests/test_l3nm_gnmi_static_rule_gen/MockServiceHandler.py b/src/service/tests/test_l3nm_gnmi_static_rule_gen/MockServiceHandler.py
index 22da218ab53c4a9d08c07dbf2553b9d8bbf407a8..a480f6b31884610782b15340ff1c40b6209b062d 100644
--- a/src/service/tests/test_l3nm_gnmi_static_rule_gen/MockServiceHandler.py
+++ b/src/service/tests/test_l3nm_gnmi_static_rule_gen/MockServiceHandler.py
@@ -19,6 +19,7 @@ from common.tools.object_factory.Connection import json_connection_id
 from common.tools.object_factory.Device import json_device_id
 from common.type_checkers.Checkers import chk_type
 from service.service.service_handler_api._ServiceHandler import _ServiceHandler
+#from service.service.service_handler_api.AnyTreeTools import TreeNode
 from service.service.service_handler_api.SettingsHandler import SettingsHandler
 from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching
 from .MockTaskExecutor import MockTaskExecutor
@@ -45,6 +46,10 @@ class MockServiceHandler(_ServiceHandler):
         service_settings = self.__settings_handler.get_service_settings()
         self.__config_rule_composer.configure(self.__service, service_settings)
 
+        #prev_endpoint_obj = None
+        #prev_endpoint     = None
+        #settings_for_next = None
+        #for i,endpoint in enumerate(endpoints):
         for endpoint in endpoints:
             device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint)
 
@@ -60,8 +65,35 @@ class MockServiceHandler(_ServiceHandler):
             _endpoint = _device.get_endpoint(endpoint_obj.name)
             _endpoint.configure(endpoint_obj, endpoint_settings)
 
+            #if settings_for_next is not None:
+            #    _endpoint.configure(endpoint_obj, settings_for_next)
+            #    settings_for_next = None
+
+            #if endpoint_settings is not None and 'neighbor_address' in endpoint_settings.value:
+            #    _neighbor_settings = {'address_ip': endpoint_settings.value['neighbor_address']}
+            #
+            #    if 'address_prefix' in endpoint_settings.value:
+            #        _neighbor_settings['address_prefix'] = endpoint_settings.value['address_prefix']
+            #    elif 'prefix_length' in endpoint_settings.value:
+            #        _neighbor_settings['address_prefix'] = endpoint_settings.value['prefix_length']
+            #    else:
+            #        MSG = 'IP Address Prefix not found. Tried: address_prefix and prefix_length. endpoint_settings.value={:s}'
+            #        raise Exception(MSG.format(str(endpoint_settings.value)))
+            #
+            #    neighbor_settings = TreeNode('.')
+            #    neighbor_settings.value = _neighbor_settings
+            #    if i % 2 == 0:
+            #        # configure in next endpoint
+            #        settings_for_next = neighbor_settings
+            #    else:
+            #        # configure in previous endpoint
+            #        prev_endpoint.configure(prev_endpoint_obj, neighbor_settings)
+
             self.__endpoint_map[(device_uuid, endpoint_uuid)] = (device_obj.name, endpoint_obj.name)
 
+            #prev_endpoint = _endpoint
+            #prev_endpoint_obj = endpoint_obj
+
         self.__static_route_generator.compose(endpoints)
         LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump())))
 
@@ -106,7 +138,7 @@ class MockServiceHandler(_ServiceHandler):
         #network_instance_name = service_uuid.split('-')[0]
         #config_rules_per_device = self.__config_rule_composer.get_config_rules(network_instance_name, delete=False)
         config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=False)
-        LOGGER.debug('config_rules_per_device={:s}'.format(str(config_rules_per_device)))
+        LOGGER.debug('config_rules_per_device={:s}'.format(json.dumps(config_rules_per_device)))
         results = self._do_configurations(config_rules_per_device, endpoints)
         LOGGER.debug('results={:s}'.format(str(results)))
         return results
@@ -123,7 +155,7 @@ class MockServiceHandler(_ServiceHandler):
         #network_instance_name = service_uuid.split('-')[0]
         #config_rules_per_device = self.__config_rule_composer.get_config_rules(network_instance_name, delete=True)
         config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=True)
-        LOGGER.debug('config_rules_per_device={:s}'.format(str(config_rules_per_device)))
+        LOGGER.debug('config_rules_per_device={:s}'.format(json.dumps(config_rules_per_device)))
         results = self._do_configurations(config_rules_per_device, endpoints, delete=True)
         LOGGER.debug('results={:s}'.format(str(results)))
         return results
diff --git a/src/service/tests/test_l3nm_gnmi_static_rule_gen/test_unitary_sns4sns.py b/src/service/tests/test_l3nm_gnmi_static_rule_gen/test_unitary_sns4sns.py
index 0177500e2a3963fb00b3aabc27b2aa0bcaa0f12d..64035f1bbf4bb80bb5192488bffcbb4962458617 100644
--- a/src/service/tests/test_l3nm_gnmi_static_rule_gen/test_unitary_sns4sns.py
+++ b/src/service/tests/test_l3nm_gnmi_static_rule_gen/test_unitary_sns4sns.py
@@ -37,27 +37,23 @@ SERVICE = Service(**json_service_l3nm_planned(
         json_endpoint_id(json_device_id('edge-net'), 'eth1'),
     ],
     config_rules=[
+        json_config_rule_set('/settings', {'address_families': ['IPV4'], 'mtu': 1500}),
+        json_config_rule_set('/static_routing', {}),
+
         json_config_rule_set('/device[core-net]/endpoint[eth1]/settings', {
-            'address_ip': '10.10.10.0', 'address_prefix': 24, 'index': 0
-        }),
-        json_config_rule_set('/device[r1]/endpoint[eth10]/settings', {
-            'address_ip': '10.10.10.229', 'address_prefix': 24, 'index': 0
-        }),
-        json_config_rule_set('/device[r2]/endpoint[eth10]/settings', {
-            'address_ip': '10.158.72.229', 'address_prefix': 24, 'index': 0
+            'address_ip': '10.10.10.0', 'neighbor_address': '10.10.10.229', 'address_prefix': 24, 'index': 0
         }),
         json_config_rule_set('/device[edge-net]/endpoint[eth1]/settings', {
-            'address_ip': '10.158.72.0', 'address_prefix': 24, 'index': 0
+            'address_ip': '10.158.72.0', 'neighbor_address': '10.158.72.229', 'address_prefix': 24, 'index': 0
         }),
     ]
 ))
 
 CONNECTION_ENDPOINTS : List[Tuple[str, str, Optional[str]]] = [
     #('core-net', 'int',   None),
-    ('core-net', 'eth1',  None),
-    ('r1',       'eth10', None), ('r1',       'eth2',  None),
-    ('r2',       'eth1',  None), ('r2',       'eth10', None),
-    ('edge-net', 'eth1',  None),
+    ('core-net', 'eth1',  None), ('r1',       'eth10', None),
+    ('r1',       'eth2',  None), ('r2',       'eth1',  None),
+    ('r2',       'eth10', None), ('edge-net', 'eth1',  None),
     #('edge-net', 'int',   None),
 ]
 
diff --git a/src/slice/service/SliceService.py b/src/slice/service/SliceService.py
index dc2584f82d0704080f16e83aa958ba0db6f08fe2..ac4e809762c1ffb6097506f34d519056f2bd3426 100644
--- a/src/slice/service/SliceService.py
+++ b/src/slice/service/SliceService.py
@@ -14,9 +14,10 @@
 
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_port_grpc
+from common.proto.slice_pb2 import DESCRIPTOR as SLICE_DESCRIPTOR
 from common.proto.slice_pb2_grpc import add_SliceServiceServicer_to_server
 from common.tools.service.GenericGrpcService import GenericGrpcService
-from slice.service.SliceServiceServicerImpl import SliceServiceServicerImpl
+from .SliceServiceServicerImpl import SliceServiceServicerImpl
 
 class SliceService(GenericGrpcService):
     def __init__(self, cls_name: str = __name__) -> None:
@@ -26,3 +27,5 @@ class SliceService(GenericGrpcService):
 
     def install_servicers(self):
         add_SliceServiceServicer_to_server(self.slice_servicer, self.server)
+
+        self.add_reflection_service_name(SLICE_DESCRIPTOR, 'SliceService')
diff --git a/src/tests/p4-fwd-l1/tests/Objects.py b/src/tests/p4-fwd-l1/tests/Objects.py
index ba260e936805e57fb8f07761b1e5fc79cffbaf8f..83d1e23b438693f3dbe6b657d0332a4f0a9d1680 100644
--- a/src/tests/p4-fwd-l1/tests/Objects.py
+++ b/src/tests/p4-fwd-l1/tests/Objects.py
@@ -13,18 +13,12 @@
 # limitations under the License.
 
 import os
-from typing import Dict, List, Tuple
 from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME
 from common.tools.object_factory.Context import json_context, json_context_id
 from common.tools.object_factory.Device import (
-    json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled,
-    json_device_connect_rules, json_device_id, json_device_p4_disabled,
-    json_device_emulated_tapi_disabled, json_device_id, json_device_packetrouter_disabled, json_device_tapi_disabled)
-from common.tools.object_factory.Service import (
-    get_service_uuid, json_service_l3nm_planned,json_service_p4_planned)
-from common.tools.object_factory.ConfigRule import (
-    json_config_rule_set, json_config_rule_delete)
-from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_ids, json_endpoints, json_endpoint_id
+    json_device_connect_rules, json_device_id, json_device_p4_disabled)
+from common.tools.object_factory.Service import get_service_uuid, json_service_p4_planned
+from common.tools.object_factory.EndPoint import json_endpoint_ids, json_endpoints
 from common.tools.object_factory.EndPoint import json_endpoint_descriptor
 from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id
 from common.tools.object_factory.Topology import json_topology, json_topology_id
diff --git a/src/tests/p4-fwd-l1/tests/test_functional_bootstrap.py b/src/tests/p4-fwd-l1/tests/test_functional_bootstrap.py
index fe622c908b44ed28a5c91ca8b35b8524511de388..341799c0224794bd32e0a81a02c593bd1173eca1 100644
--- a/src/tests/p4-fwd-l1/tests/test_functional_bootstrap.py
+++ b/src/tests/p4-fwd-l1/tests/test_functional_bootstrap.py
@@ -14,14 +14,9 @@
 
 import copy, logging, pytest
 from common.Settings import get_setting
-from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events
 from common.tools.object_factory.Context import json_context_id
-from common.tools.object_factory.Device import json_device_id
-from common.tools.object_factory.Link import json_link_id
-from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
-from context.client.EventsCollector import EventsCollector
-from common.proto.context_pb2 import ConfigActionEnum, Context, ContextId, Device, Empty, Link, Topology, DeviceOperationalStatusEnum
+from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology, DeviceOperationalStatusEnum
 from device.client.DeviceClient import DeviceClient
 from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
 
diff --git a/src/tests/p4-fwd-l1/tests/test_functional_cleanup.py b/src/tests/p4-fwd-l1/tests/test_functional_cleanup.py
index 67934ff8a78f79252ef4b6994c3b628a8275c503..0b44f794d7201f49034c536c8988798ac9b44bc7 100644
--- a/src/tests/p4-fwd-l1/tests/test_functional_cleanup.py
+++ b/src/tests/p4-fwd-l1/tests/test_functional_cleanup.py
@@ -12,18 +12,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import copy, logging, pytest
+import logging, pytest
 from common.Settings import get_setting
-from common.tests.EventTools import EVENT_REMOVE, check_events
 from common.tools.object_factory.Context import json_context_id
-from common.tools.object_factory.Device import json_device_id
-from common.tools.object_factory.Link import json_link_id
-from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
-from context.client.EventsCollector import EventsCollector
-from common.proto.context_pb2 import ConfigActionEnum, ContextId, Device, DeviceId, Empty, Link, LinkId, TopologyId, DeviceOperationalStatusEnum
+from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId
 from device.client.DeviceClient import DeviceClient
-from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+from .Objects import CONTEXTS, DEVICES, LINKS, TOPOLOGIES
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
diff --git a/src/tests/p4-fwd-l1/tests/test_functional_create_service.py b/src/tests/p4-fwd-l1/tests/test_functional_create_service.py
index 9f82da129e159f02d40ee57b890409b0a5685b75..ee714c4774e417a9afb84554d4dc99fe57d96863 100644
--- a/src/tests/p4-fwd-l1/tests/test_functional_create_service.py
+++ b/src/tests/p4-fwd-l1/tests/test_functional_create_service.py
@@ -14,24 +14,16 @@
 
 import copy, logging, pytest
 from common.Settings import get_setting
-from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events
-from common.tools.object_factory.Context import json_context_id
-from common.tools.object_factory.Device import json_device_id
-from common.tools.object_factory.Service import json_service_id
-from common.tools.object_factory.Link import json_link_id
-from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
-from context.client.EventsCollector import EventsCollector
-from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology, Service, ServiceId
+from common.proto.context_pb2 import Service
 from device.client.DeviceClient import DeviceClient
 from service.client.ServiceClient import ServiceClient
-from tests.p4.tests.Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, SERVICES
-from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceId,\
-    DeviceOperationalStatusEnum
+from tests.p4.tests.Objects import SERVICES
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
 
+
 @pytest.fixture(scope='session')
 def context_client():
     _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC'))
diff --git a/src/tests/p4-fwd-l1/tests/test_functional_delete_service.py b/src/tests/p4-fwd-l1/tests/test_functional_delete_service.py
index 98b6a01c1be2a00003b3be267839a4513967d4f7..439a88efd3f72dbd1389d9d47494fa891ff2b2d2 100644
--- a/src/tests/p4-fwd-l1/tests/test_functional_delete_service.py
+++ b/src/tests/p4-fwd-l1/tests/test_functional_delete_service.py
@@ -14,18 +14,12 @@
 
 import copy, logging, pytest
 from common.Settings import get_setting
-from common.tests.EventTools import EVENT_REMOVE, check_events
-from common.tools.object_factory.Context import json_context_id
-from common.tools.object_factory.Device import json_device_id
 from common.tools.object_factory.Service import json_service_id
-from common.tools.object_factory.Link import json_link_id
-from common.tools.object_factory.Topology import json_topology_id
 from context.client.ContextClient import ContextClient
-from context.client.EventsCollector import EventsCollector
-from common.proto.context_pb2 import ConfigActionEnum, ContextId, Device, DeviceId, Empty, LinkId, TopologyId, Service, ServiceId, DeviceOperationalStatusEnum
+from common.proto.context_pb2 import ServiceId
 from device.client.DeviceClient import DeviceClient
 from service.client.ServiceClient import ServiceClient
-from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, SERVICES
+from .Objects import CONTEXT_ID, SERVICES
 
 LOGGER = logging.getLogger(__name__)
 LOGGER.setLevel(logging.DEBUG)
diff --git a/src/tests/sns4sns24/02-ietf-l3vpn-nbi.json b/src/tests/sns4sns24/02-ietf-l3vpn-nbi.json
index 31d7e0a6d1636af4572b10dd07cecc9a2aedeb9d..0d34cfe9adab63afc37bf4c618a5a6fc25dadbc7 100644
--- a/src/tests/sns4sns24/02-ietf-l3vpn-nbi.json
+++ b/src/tests/sns4sns24/02-ietf-l3vpn-nbi.json
@@ -11,7 +11,7 @@
                     "site-network-accesses": {
                         "site-network-access": [
                             {
-                                "site-network-access-id": "int",
+                                "site-network-access-id": "eth1",
                                 "site-network-access-type": "ietf-l3vpn-svc:multipoint",
                                 "device-reference": "core-net",
                                 "vpn-attachment": {"vpn-id": "ietf-l3vpn-edge-core", "site-role": "ietf-l3vpn-svc:spoke-role"},
@@ -48,7 +48,7 @@
                     "site-network-accesses": {
                         "site-network-access": [
                             {
-                                "site-network-access-id": "int",
+                                "site-network-access-id": "eth1",
                                 "site-network-access-type": "ietf-l3vpn-svc:multipoint",
                                 "device-reference": "edge-net",
                                 "vpn-attachment": {"vpn-id": "ietf-l3vpn-edge-core", "site-role": "ietf-l3vpn-svc:hub-role"},
diff --git a/src/vnt_manager/service/VNTManagerService.py b/src/vnt_manager/service/VNTManagerService.py
index b95ad089a454e9d73e99d9f14cbe525a6c9ca8cb..3f44c4a510245fc2ab145e35b8ff1f35e4ac0f78 100644
--- a/src/vnt_manager/service/VNTManagerService.py
+++ b/src/vnt_manager/service/VNTManagerService.py
@@ -12,19 +12,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
-
 from common.Constants import ServiceNameEnum
-from common.proto.vnt_manager_pb2_grpc import add_VNTManagerServiceServicer_to_server
 from common.Settings import get_service_port_grpc
+from common.proto.vnt_manager_pb2 import DESCRIPTOR as VNT_MANAGER_DESCRIPTOR
+from common.proto.vnt_manager_pb2_grpc import add_VNTManagerServiceServicer_to_server
 from common.tools.service.GenericGrpcService import GenericGrpcService
 from .VNTManagerServiceServicerImpl import VNTManagerServiceServicerImpl
 
-LOGGER = logging.getLogger(__name__)
-
-
 class VNTManagerService(GenericGrpcService):
-    def __init__(self, cls_name: str = __name__):
+    def __init__(self, cls_name: str = __name__) -> None:
         port = get_service_port_grpc(ServiceNameEnum.VNTMANAGER)
         super().__init__(port, cls_name=cls_name)
         self.vntmanager_servicer = VNTManagerServiceServicerImpl()
@@ -33,3 +29,5 @@ class VNTManagerService(GenericGrpcService):
         add_VNTManagerServiceServicer_to_server(
             self.vntmanager_servicer, self.server
         )
+
+        self.add_reflection_service_name(VNT_MANAGER_DESCRIPTOR, 'VNTManagerService')