diff --git a/src/automation/src/main/java/eu/teraflow/automation/AutomationServiceImpl.java b/src/automation/src/main/java/eu/teraflow/automation/AutomationServiceImpl.java
index 773c99de6d94b5f8806a8a354b2371c0a6748f9f..54255cf78481bbaa1ecdd81a097a335292d81ed5 100644
--- a/src/automation/src/main/java/eu/teraflow/automation/AutomationServiceImpl.java
+++ b/src/automation/src/main/java/eu/teraflow/automation/AutomationServiceImpl.java
@@ -50,33 +50,34 @@ public class AutomationServiceImpl implements AutomationService {
                         device -> {
                             final var id = deviceId;
 
-                            if (!device.isEnabled()) {
-                                LOGGER.infof(MESSAGE, device);
-
-                                final var initialConfiguration =
-                                        deviceService.getInitialConfiguration(device.getDeviceId());
-
-                                device.enableDevice();
-                                LOGGER.infof("Enabled device [%s]", id);
-
-                                initialConfiguration
-                                        .subscribe()
-                                        .with(
-                                                deviceConfig -> {
-                                                    device.setDeviceConfiguration(deviceConfig);
-                                                    final var configuredDeviceIdUni = deviceService.configureDevice(device);
-
-                                                    configuredDeviceIdUni
-                                                            .subscribe()
-                                                            .with(
-                                                                    configuredDeviceId ->
-                                                                            LOGGER.infof(
-                                                                                    "Device [%s] has been enabled and configured successfully with %s.\n",
-                                                                                    id, deviceConfig));
-                                                });
-                            } else {
-                                LOGGER.infof("%s has been already enabled. Ignoring...", device);
+                            if (device.isEnabled()) {
+                                LOGGER.warnf("%s has already been enabled. Ignoring...", device);
+                                return;
                             }
+
+                            LOGGER.infof(MESSAGE, device);
+
+                            final var initialConfiguration =
+                                    deviceService.getInitialConfiguration(device.getDeviceId());
+
+                            device.enableDevice();
+                            LOGGER.infof("Enabled device [%s]", id);
+
+                            initialConfiguration
+                                    .subscribe()
+                                    .with(
+                                            deviceConfig -> {
+                                                device.setDeviceConfiguration(deviceConfig);
+                                                final var configuredDeviceIdUni = deviceService.configureDevice(device);
+
+                                                configuredDeviceIdUni
+                                                        .subscribe()
+                                                        .with(
+                                                                configuredDeviceId ->
+                                                                        LOGGER.infof(
+                                                                                "Device [%s] has been successfully enabled and configured with %s.\n",
+                                                                                id, deviceConfig));
+                                            });
                         });
 
         return deserializedDeviceUni;
@@ -92,13 +93,23 @@ public class AutomationServiceImpl implements AutomationService {
                         device -> {
                             final var id = deviceId;
 
+                            if (device.isDisabled()) {
+                                LOGGER.warnf("%s has already been disabled. Ignoring...", device);
+                                return;
+                            }
+
+                            device.disableDevice();
+                            LOGGER.infof("Disabled device [%s]", id);
+
                             LOGGER.infof(MESSAGE, device);
 
                             final var empty = deviceService.deleteDevice(device.getDeviceId());
 
                             empty
                                     .subscribe()
-                                    .with(emptyMessage -> LOGGER.infof("Device [%s] has been deleted.\n", id));
+                                    .with(
+                                            emptyMessage ->
+                                                    LOGGER.infof("Device [%s] has been successfully deleted.\n", id));
                         });
 
         return deserializedDeviceUni;
@@ -114,6 +125,11 @@ public class AutomationServiceImpl implements AutomationService {
                         device -> {
                             final var id = deviceId;
 
+                            if (!device.isEnabled()) {
+                                LOGGER.warnf("Cannot update disabled device %s. Ignoring...", device);
+                                return;
+                            }
+
                             LOGGER.infof(MESSAGE, device);
                             device.setDeviceConfiguration(deviceConfig);
                             final var updatedDeviceIdUni = deviceService.configureDevice(device);
@@ -123,7 +139,7 @@ public class AutomationServiceImpl implements AutomationService {
                                     .with(
                                             configuredDeviceId ->
                                                     LOGGER.infof(
-                                                            "Device [%s] has been updated successfully with %s.\n",
+                                                            "Device [%s] has been successfully updated with %s.\n",
                                                             id, deviceConfig));
                         });
 
diff --git a/src/automation/src/main/java/eu/teraflow/automation/ContextSubscriber.java b/src/automation/src/main/java/eu/teraflow/automation/ContextSubscriber.java
index c4d636b6b4dca7241808ade421f32a77861e4d3f..2fc3a3356456b3c1bc55137f686a7e82570a3171 100644
--- a/src/automation/src/main/java/eu/teraflow/automation/ContextSubscriber.java
+++ b/src/automation/src/main/java/eu/teraflow/automation/ContextSubscriber.java
@@ -78,9 +78,11 @@ public class ContextSubscriber {
                                     automationService.deleteDevice(deviceEvent.getDeviceId());
                                     break;
                                 case UPDATE:
-                                    LOGGER.infof("Received %s for device [%s]", event, deviceId);
-                                    automationService.updateDevice(
-                                            deviceEvent.getDeviceId(), deviceEvent.getDeviceConfig().orElse(null));
+                                    LOGGER.warnf(
+                                        "Received %s for device [%s]. " +
+                                            "No automation action on an already updated device",
+                                            event, deviceId);
+                                    break;
                                 case UNDEFINED:
                                     logWarningMessage(event, deviceId, eventType);
                                     break;
diff --git a/src/automation/src/main/java/eu/teraflow/automation/context/model/Device.java b/src/automation/src/main/java/eu/teraflow/automation/context/model/Device.java
index 77bd3ca5c861713b43faf178c6450e35e6032b3c..1e5563917625a9679feb9e9491990885cc4a3c22 100644
--- a/src/automation/src/main/java/eu/teraflow/automation/context/model/Device.java
+++ b/src/automation/src/main/java/eu/teraflow/automation/context/model/Device.java
@@ -61,10 +61,18 @@ public class Device {
         return deviceOperationalStatus == DeviceOperationalStatus.ENABLED;
     }
 
+    public boolean isDisabled() {
+        return deviceOperationalStatus == DeviceOperationalStatus.DISABLED;
+    }
+
     public void enableDevice() {
         this.deviceOperationalStatus = DeviceOperationalStatus.ENABLED;
     }
 
+    public void disableDevice() {
+        this.deviceOperationalStatus = DeviceOperationalStatus.DISABLED;
+    }
+
     public String getDeviceId() {
         return deviceId;
     }
diff --git a/src/device/service/drivers/p4/p4_global_options.py b/src/device/service/drivers/p4/p4_global_options.py
index 86043b671e9316dfeff2fb12db8ab3088386382a..3457c8d556902557aa3991eee20418cf3094666c 100644
--- a/src/device/service/drivers/p4/p4_global_options.py
+++ b/src/device/service/drivers/p4/p4_global_options.py
@@ -42,7 +42,8 @@ class GlobalOptions:
     option_helpstrings = {
         Options.canonical_bytestrings: """
 Use byte-padded legacy format for binary strings sent to the P4Runtime server,
-instead of the canonical representation. See P4Runtime specification for details.
+instead of the canonical representation. See P4Runtime specification for
+details.
 """
     }
 
@@ -199,6 +200,7 @@ def make_canonical_if_option_set(bytes_):
     :return: canonical bytes
     """
 
-    if GLOBAL_OPTIONS.get_option(Options.canonical_bytestrings):
-        return to_canonical_bytes(bytes_)
+    # TODO: Fix canonical representation issue
+    # if GLOBAL_OPTIONS.get_option(Options.canonical_bytestrings):
+    #     return to_canonical_bytes(bytes_)
     return bytes_
diff --git a/src/device/service/drivers/p4/p4_manager.py b/src/device/service/drivers/p4/p4_manager.py
index dc25e80b5803bfdec7d802d41c136865f4c045e3..65f8602ea30fa2d8cd06b09655ee4ee63d045a97 100644
--- a/src/device/service/drivers/p4/p4_manager.py
+++ b/src/device/service/drivers/p4/p4_manager.py
@@ -175,9 +175,7 @@ def insert_table_entry_exact(
     try:
         table_entry.insert()
         LOGGER.info("Inserted exact table entry: %s", table_entry)
-    except P4RuntimeWriteException as ex:
-        ex_msg = str(ex)
-    except P4RuntimeException as ex:
+    except (P4RuntimeException, P4RuntimeWriteException) as ex:
         raise P4RuntimeException from ex
 
     # Table entry exists, needs to be modified
@@ -230,9 +228,7 @@ def insert_table_entry_ternary(
     try:
         table_entry.insert()
         LOGGER.info("Inserted ternary table entry: %s", table_entry)
-    except P4RuntimeWriteException as ex:
-        ex_msg = str(ex)
-    except P4RuntimeException as ex:
+    except (P4RuntimeException, P4RuntimeWriteException) as ex:
         raise P4RuntimeException from ex
 
     # Table entry exists, needs to be modified
@@ -893,10 +889,6 @@ class P4Manager:
 
         # Exact match is supported
         if get_table_type(table) == p4info_pb2.MatchField.EXACT:
-            if priority != 0:
-                msg = f"Table {table_name} is non-ternary, priority must be 0"
-                LOGGER.error(msg)
-                raise UserError(msg)
             return insert_table_entry_exact(
                 table_name, match_map, action_name, action_params, metadata,
                 cnt_pkt, cnt_byte)
@@ -904,10 +896,6 @@ class P4Manager:
         # Ternary and LPM matches are supported
         if get_table_type(table) in \
                 [p4info_pb2.MatchField.TERNARY, p4info_pb2.MatchField.LPM]:
-            if priority == 0:
-                msg = f"Table {table_name} is ternary, priority must be != 0"
-                LOGGER.error(msg)
-                raise UserError(msg)
             return insert_table_entry_ternary(
                 table_name, match_map, action_name, action_params, metadata,
                 priority, cnt_pkt, cnt_byte)
@@ -955,12 +943,6 @@ class P4Manager:
         for action_k, action_v in action_params.items():
             table_entry.action[action_k] = action_v
 
-        if get_table_type(table) == p4info_pb2.MatchField.EXACT:
-            if priority != 0:
-                msg = f"Table {table_name} is non-ternary, priority must be 0"
-                LOGGER.error(msg)
-                raise UserError(msg)
-
         if get_table_type(table) in \
                 [p4info_pb2.MatchField.TERNARY, p4info_pb2.MatchField.LPM]:
             if priority == 0:
diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py
index bc71168f621afc9f0a9ed93d51844542beed813c..71fe14f53395e2ac57884911fe846c9c1b2c2834 100644
--- a/src/service/service/ServiceServiceServicerImpl.py
+++ b/src/service/service/ServiceServiceServicerImpl.py
@@ -22,7 +22,7 @@ from common.rpc_method_wrapper.ServiceExceptions import AlreadyExistsException,
 from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string
 from context.client.ContextClient import ContextClient
 from pathcomp.frontend.client.PathCompClient import PathCompClient
-from service.service.tools.ContextGetters import get_service
+from .tools.ContextGetters import get_service
 from .service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
 from .task_scheduler.TaskScheduler import TasksScheduler
 
diff --git a/src/service/service/service_handler_api/ServiceHandlerFactory.py b/src/service/service/service_handler_api/ServiceHandlerFactory.py
index 09a56775d4f391d71fe5ac30f9be74430120e306..00f9535bfaa9f152101ab14a87e413991619ba76 100644
--- a/src/service/service/service_handler_api/ServiceHandlerFactory.py
+++ b/src/service/service/service_handler_api/ServiceHandlerFactory.py
@@ -14,21 +14,23 @@
 
 import logging, operator
 from enum import Enum
-from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
+from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple
 from common.proto.context_pb2 import Device, Service
 from common.tools.grpc.Tools import grpc_message_to_json_string
-from service.service.service_handler_api._ServiceHandler import _ServiceHandler
 from .Exceptions import (
     UnsatisfiedFilterException, UnsupportedServiceHandlerClassException, UnsupportedFilterFieldException,
     UnsupportedFilterFieldValueException)
 from .FilterFields import FILTER_FIELD_ALLOWED_VALUES, FilterFieldEnum
 
+if TYPE_CHECKING:
+    from service.service.service_handler_api._ServiceHandler import _ServiceHandler
+
 LOGGER = logging.getLogger(__name__)
 
 class ServiceHandlerFactory:
     def __init__(self, service_handlers : List[Tuple[type, List[Dict[FilterFieldEnum, Any]]]]) -> None:
         # Dict{field_name => Dict{field_value => Set{ServiceHandler}}}
-        self.__indices : Dict[str, Dict[str, Set[_ServiceHandler]]] = {}
+        self.__indices : Dict[str, Dict[str, Set['_ServiceHandler']]] = {}
 
         for service_handler_class,filter_field_sets in service_handlers:
             for filter_fields in filter_field_sets:
@@ -36,6 +38,7 @@ class ServiceHandlerFactory:
                 self.register_service_handler_class(service_handler_class, **filter_fields)
 
     def register_service_handler_class(self, service_handler_class, **filter_fields):
+        from service.service.service_handler_api._ServiceHandler import _ServiceHandler
         if not issubclass(service_handler_class, _ServiceHandler):
             raise UnsupportedServiceHandlerClassException(str(service_handler_class))
 
@@ -59,12 +62,12 @@ class ServiceHandlerFactory:
                 field_indice_service_handlers = field_indice.setdefault(field_value, set())
                 field_indice_service_handlers.add(service_handler_class)
 
-    def get_service_handler_class(self, **filter_fields) -> _ServiceHandler:
+    def get_service_handler_class(self, **filter_fields) -> '_ServiceHandler':
         supported_filter_fields = set(FILTER_FIELD_ALLOWED_VALUES.keys())
         unsupported_filter_fields = set(filter_fields.keys()).difference(supported_filter_fields)
         if len(unsupported_filter_fields) > 0: raise UnsupportedFilterFieldException(unsupported_filter_fields)
 
-        candidate_service_handler_classes : Dict[_ServiceHandler, int] = None # num. filter hits per service_handler
+        candidate_service_handler_classes : Dict['_ServiceHandler', int] = None # num. filter hits per service_handler
         for field_name, field_values in filter_fields.items():
             field_indice = self.__indices.get(field_name)
             if field_indice is None: continue
@@ -109,7 +112,7 @@ def get_common_device_drivers(drivers_per_device : List[Set[int]]) -> Set[int]:
 
 def get_service_handler_class(
     service_handler_factory : ServiceHandlerFactory, service : Service, connection_devices : Dict[str, Device]
-) -> Optional[_ServiceHandler]:
+) -> Optional['_ServiceHandler']:
 
     str_service_key = grpc_message_to_json_string(service.service_id)
 
diff --git a/src/service/service/service_handler_api/_ServiceHandler.py b/src/service/service/service_handler_api/_ServiceHandler.py
index c642afe75342309f607ed722cf78544bcfdb1ebd..a5042a504d1ade0a357ecb298a340707fe8b167e 100644
--- a/src/service/service/service_handler_api/_ServiceHandler.py
+++ b/src/service/service/service_handler_api/_ServiceHandler.py
@@ -14,7 +14,7 @@
 
 from typing import Any, List, Optional, Tuple, Union
 from common.proto.context_pb2 import Service
-from service.task_scheduler.TaskExecutor import TaskExecutor
+from service.service.task_scheduler.TaskExecutor import TaskExecutor
 
 class _ServiceHandler:
     def __init__(self,
diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py
index 416e1698f2432e22ae5cfe8e437570fc7d3c8880..757a660590dde1b3fb2eee7090b2329cd45ec8cb 100644
--- a/src/service/service/task_scheduler/TaskExecutor.py
+++ b/src/service/service/task_scheduler/TaskExecutor.py
@@ -13,16 +13,18 @@
 # limitations under the License.
 
 from enum import Enum
-from typing import Any, Dict, Optional, Union
+from typing import TYPE_CHECKING, Any, Dict, Optional, Union
 from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceId, Service, ServiceId
 from common.rpc_method_wrapper.ServiceExceptions import NotFoundException
 from context.client.ContextClient import ContextClient
 from device.client.DeviceClient import DeviceClient
-from service.service.service_handler_api._ServiceHandler import _ServiceHandler
 from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory, get_service_handler_class
 from service.service.tools.ContextGetters import get_connection, get_device, get_service
 from service.service.tools.ObjectKeys import get_connection_key, get_device_key, get_service_key
 
+if TYPE_CHECKING:
+    from service.service.service_handler_api._ServiceHandler import _ServiceHandler
+
 CacheableObject = Union[Connection, Device, Service]
 
 class CacheableObjectType(Enum):
@@ -136,7 +138,7 @@ class TaskExecutor:
 
     def get_service_handler(
         self, connection : Connection, service : Service, **service_handler_settings
-    ) -> _ServiceHandler:
+    ) -> '_ServiceHandler':
         connection_devices = self.get_devices_from_connection(connection)
         service_handler_class = get_service_handler_class(self._service_handler_factory, service, connection_devices)
         return service_handler_class(service, self, **service_handler_settings)
diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py
index de7e9eb7a70e683051e9d2fd906252713dcdba54..6f2bdba3e14a799e361b2543b4160b69b230d66a 100644
--- a/src/service/service/task_scheduler/TaskScheduler.py
+++ b/src/service/service/task_scheduler/TaskScheduler.py
@@ -13,12 +13,11 @@
 # limitations under the License.
 
 import graphlib, logging, queue, time
-from typing import Dict, Tuple
+from typing import TYPE_CHECKING, Dict, Tuple
 from common.proto.context_pb2 import Connection, ConnectionId, Service, ServiceId, ServiceStatusEnum
 from common.proto.pathcomp_pb2 import PathCompReply
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from context.client.ContextClient import ContextClient
-from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
 from service.service.tools.ObjectKeys import get_connection_key, get_service_key
 from .tasks._Task import _Task
 from .tasks.Task_ConnectionConfigure import Task_ConnectionConfigure
@@ -27,10 +26,13 @@ from .tasks.Task_ServiceDelete import Task_ServiceDelete
 from .tasks.Task_ServiceSetStatus import Task_ServiceSetStatus
 from .TaskExecutor import CacheableObjectType, TaskExecutor
 
+if TYPE_CHECKING:
+    from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
+
 LOGGER = logging.getLogger(__name__)
 
 class TasksScheduler:
-    def __init__(self, service_handler_factory : ServiceHandlerFactory) -> None:
+    def __init__(self, service_handler_factory : 'ServiceHandlerFactory') -> None:
         self._dag = graphlib.TopologicalSorter()
         self._executor = TaskExecutor(service_handler_factory)
         self._tasks : Dict[str, _Task] = dict()
diff --git a/src/tests/netx22-p4/README.md b/src/tests/netx22-p4/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..51884787daf389cd256aa3f4d02298e4e630097d
--- /dev/null
+++ b/src/tests/netx22-p4/README.md
@@ -0,0 +1,73 @@
+# Network X 22 Demo - P4 driver, Basic connectivity functionality
+
+This functional test shows the P4 driver with a basic connectivity test between 2 hosts connected to a single P4 switch, using the TeraFlow Cloud-native SDN Controller.
+
+## Functional test folder
+
+This functional test can be found in folder `src/tests/netx22-p4/`. 
+
+## P4 source and Mininet topology
+
+This test is designed to operate with a mininet deployment that contains 2 hosts and a BMv2 switch, such a topology can be found in the 'src/tests/netx22-p4/mininet' folder.
+Additionally the P4 source code, along with its compiled artifacts are present in the 'src/tests/netx22-p4/mininet' folder.
+
+## Deployment and Dependencies
+
+To run this functional test, it is assumed you have deployed a MicroK8s-based Kubernetes environment and a TeraFlowSDN
+controller instance as described in the [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the Python
+environment as described in
+[Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md).
+Remember to source the scenario settings appropriately, e.g., `cd ~/tfs-ctrl && source my_deploy.sh` in each terminal
+you open.
+
+Additionally mininet should be installed, we suggest using the mininet packaged in the [Next-Gen SDN Tutorial][https://github.com/opennetworkinglab/ngsdn-tutorial], as it provides an easy way to deploy mininet dockerized and comes with the BMv2Stratum software switch. 
+
+## Test Execution
+
+### Mininet 
+To execute this functional test, first make sure that mininet is running the correct topology.
+If you have used the Next-Gen SDN Tutorial for it, you may add the topology provided in the mininet folder of the ngsdn tutorial and add the following make rule to its Makefile
+```
+start-simple: NGSDN_TOPO_PY := topo-simple.py
+start-simple: _start
+```
+
+After that run
+```
+make start-simple
+make mn-cli
+```
+You will be prompted with the mininet cli. Run the following and let it run until the end of the experiment
+```
+client ping server
+```
+
+### Teraflow
+
+In another terminal cd to the teraflow directory and run the following
+```
+src/tests/netx22-p4/setup.sh
+```
+This will copy the p4 artifacts to the device pod.
+
+Then you can bootstrap the device to the Teraflow Controller
+```
+src/tests/netx22-p4/run_test_01_bootstrap.sh
+```
+
+Install the required rules to the p4 switch
+```
+src/tests/netx22-p4/run_test_02_create_service.sh
+```
+You should now check the mininet terminal. The two hosts should be pinging each other as intended.
+
+You can remove the rules from the p4 switch
+```
+src/tests/netx22-p4/run_test_03_delete_service.sh
+```
+The two hosts on the mininet terminal, should stop pinging.
+
+And remove the device from the Teraflow Controller
+```
+src/tests/netx22-p4/run_test_04_cleanup.sh
+```
diff --git a/src/tests/netx22-p4/__init__.py b/src/tests/netx22-p4/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/tests/netx22-p4/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/netx22-p4/deploy_specs.sh b/src/tests/netx22-p4/deploy_specs.sh
new file mode 100644
index 0000000000000000000000000000000000000000..b486474e2afad7305409bf410c7b8885b0afe2a8
--- /dev/null
+++ b/src/tests/netx22-p4/deploy_specs.sh
@@ -0,0 +1,17 @@
+# Set the URL of your local Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+export TFS_COMPONENTS="context device automation service compute monitoring webui"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE="tfs"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+
+# Set the neew Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
diff --git a/src/tests/netx22-p4/mininet/topo-simple.py b/src/tests/netx22-p4/mininet/topo-simple.py
new file mode 100755
index 0000000000000000000000000000000000000000..8a7ccaf62ecb69113e459acfbc99e020df3a02c7
--- /dev/null
+++ b/src/tests/netx22-p4/mininet/topo-simple.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+
+#  Copyright 2019-present Open Networking Foundation
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import argparse
+
+from mininet.cli import CLI
+from mininet.log import setLogLevel
+from mininet.net import Mininet
+from mininet.node import Host
+from mininet.topo import Topo
+from stratum import StratumBmv2Switch
+
+CPU_PORT = 255
+
+class IPv4Host(Host):
+    """Host that can be configured with an IPv4 gateway (default route).
+    """
+
+    def config(self, mac=None, ip=None, defaultRoute=None, lo='up', gw=None,
+               **_params):
+        super(IPv4Host, self).config(mac, ip, defaultRoute, lo, **_params)
+        self.cmd('arp -s 192.168.1.1 11:22:33:44:55:77')
+        self.cmd('ip -4 addr flush dev %s' % self.defaultIntf())
+        self.cmd('ip -6 addr flush dev %s' % self.defaultIntf())
+        self.cmd('ip -4 link set up %s' % self.defaultIntf())
+        self.cmd('ip -4 addr add %s dev %s' % (ip, self.defaultIntf()))
+        if gw:
+            self.cmd('ip -4 route add default via %s' % gw)
+        # Disable offload
+        for attr in ["rx", "tx", "sg"]:
+            cmd = "/sbin/ethtool --offload %s %s off" % (
+                self.defaultIntf(), attr)
+            self.cmd(cmd)
+
+        def updateIP():
+            return ip.split('/')[0]
+
+        self.defaultIntf().updateIP = updateIP
+
+class TutorialTopo(Topo):
+    """Basic Server-Client topology with IPv4 hosts"""
+
+    def __init__(self, *args, **kwargs):
+        Topo.__init__(self, *args, **kwargs)
+
+        # Spines
+        # gRPC port 50001
+        switch1 = self.addSwitch('switch1', cls=StratumBmv2Switch, cpuport=CPU_PORT)
+
+        # IPv4 hosts attached to switch 1
+        client = self.addHost('client', cls=IPv4Host, mac="aa:bb:cc:dd:ee:11",
+                           ip='10.0.0.1/24', gw='10.0.0.100')
+        server = self.addHost('server', cls=IPv4Host, mac="aa:bb:cc:dd:ee:22",
+                           ip='10.0.0.2/24', gw='10.0.0.100')
+        self.addLink(client, switch1)  # port 1
+        self.addLink(server, switch1)  # port 2
+
+
+def main():
+    net = Mininet(topo=TutorialTopo(), controller=None)
+    net.start()
+    client = net.hosts[0]
+    server = net.hosts[1]
+    client.setARP('10.0.0.2', 'aa:bb:cc:dd:ee:22')
+    server.setARP('10.0.0.1', 'aa:bb:cc:dd:ee:11')
+    CLI(net)
+    net.stop()
+    print '#' * 80
+    print 'ATTENTION: Mininet was stopped! Perhaps accidentally?'
+    print 'No worries, it will restart automatically in a few seconds...'
+    print 'To access again the Mininet CLI, use `make mn-cli`'
+    print 'To detach from the CLI (without stopping), press Ctrl-D'
+    print 'To permanently quit Mininet, use `make stop`'
+    print '#' * 80
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(
+        description='Mininet topology script for 2x2 fabric with stratum_bmv2 and IPv4 hosts')
+    args = parser.parse_args()
+    setLogLevel('info')
+
+    main()
diff --git a/src/tests/netx22-p4/p4/bmv2.json b/src/tests/netx22-p4/p4/bmv2.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1e3fa8ce1f6d19e9c94384d85a7ab5875a614b0
--- /dev/null
+++ b/src/tests/netx22-p4/p4/bmv2.json
@@ -0,0 +1,669 @@
+{
+  "header_types" : [
+    {
+      "name" : "scalars_0",
+      "id" : 0,
+      "fields" : [
+        ["tmp", 1, false],
+        ["local_metadata_t.is_multicast", 1, false],
+        ["_padding_0", 6, false]
+      ]
+    },
+    {
+      "name" : "standard_metadata",
+      "id" : 1,
+      "fields" : [
+        ["ingress_port", 9, false],
+        ["egress_spec", 9, false],
+        ["egress_port", 9, false],
+        ["clone_spec", 32, false],
+        ["instance_type", 32, false],
+        ["drop", 1, false],
+        ["recirculate_port", 16, false],
+        ["packet_length", 32, false],
+        ["enq_timestamp", 32, false],
+        ["enq_qdepth", 19, false],
+        ["deq_timedelta", 32, false],
+        ["deq_qdepth", 19, false],
+        ["ingress_global_timestamp", 48, false],
+        ["egress_global_timestamp", 48, false],
+        ["lf_field_list", 32, false],
+        ["mcast_grp", 16, false],
+        ["resubmit_flag", 32, false],
+        ["egress_rid", 16, false],
+        ["recirculate_flag", 32, false],
+        ["checksum_error", 1, false],
+        ["parser_error", 32, false],
+        ["priority", 3, false],
+        ["_padding", 2, false]
+      ]
+    },
+    {
+      "name" : "ethernet_t",
+      "id" : 2,
+      "fields" : [
+        ["dst_addr", 48, false],
+        ["src_addr", 48, false],
+        ["ether_type", 16, false]
+      ]
+    }
+  ],
+  "headers" : [
+    {
+      "name" : "scalars",
+      "id" : 0,
+      "header_type" : "scalars_0",
+      "metadata" : true,
+      "pi_omit" : true
+    },
+    {
+      "name" : "standard_metadata",
+      "id" : 1,
+      "header_type" : "standard_metadata",
+      "metadata" : true,
+      "pi_omit" : true
+    },
+    {
+      "name" : "ethernet",
+      "id" : 2,
+      "header_type" : "ethernet_t",
+      "metadata" : false,
+      "pi_omit" : true
+    }
+  ],
+  "header_stacks" : [],
+  "header_union_types" : [],
+  "header_unions" : [],
+  "header_union_stacks" : [],
+  "field_lists" : [],
+  "errors" : [
+    ["NoError", 1],
+    ["PacketTooShort", 2],
+    ["NoMatch", 3],
+    ["StackOutOfBounds", 4],
+    ["HeaderTooShort", 5],
+    ["ParserTimeout", 6],
+    ["ParserInvalidArgument", 7]
+  ],
+  "enums" : [],
+  "parsers" : [
+    {
+      "name" : "parser",
+      "id" : 0,
+      "init_state" : "start",
+      "parse_states" : [
+        {
+          "name" : "start",
+          "id" : 0,
+          "parser_ops" : [
+            {
+              "parameters" : [
+                {
+                  "type" : "regular",
+                  "value" : "ethernet"
+                }
+              ],
+              "op" : "extract"
+            }
+          ],
+          "transitions" : [
+            {
+              "value" : "default",
+              "mask" : null,
+              "next_state" : null
+            }
+          ],
+          "transition_key" : []
+        }
+      ]
+    }
+  ],
+  "parse_vsets" : [],
+  "deparsers" : [
+    {
+      "name" : "deparser",
+      "id" : 0,
+      "source_info" : {
+        "filename" : "p4src/main.p4",
+        "line" : 148,
+        "column" : 8,
+        "source_fragment" : "DeparserImpl"
+      },
+      "order" : ["ethernet"]
+    }
+  ],
+  "meter_arrays" : [],
+  "counter_arrays" : [],
+  "register_arrays" : [],
+  "calculations" : [],
+  "learn_lists" : [],
+  "actions" : [
+    {
+      "name" : "IngressPipeImpl.drop",
+      "id" : 0,
+      "runtime_data" : [],
+      "primitives" : [
+        {
+          "op" : "mark_to_drop",
+          "parameters" : [
+            {
+              "type" : "header",
+              "value" : "standard_metadata"
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 77,
+            "column" : 8,
+            "source_fragment" : "mark_to_drop(standard_metadata)"
+          }
+        }
+      ]
+    },
+    {
+      "name" : "IngressPipeImpl.drop",
+      "id" : 1,
+      "runtime_data" : [],
+      "primitives" : [
+        {
+          "op" : "mark_to_drop",
+          "parameters" : [
+            {
+              "type" : "header",
+              "value" : "standard_metadata"
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 77,
+            "column" : 8,
+            "source_fragment" : "mark_to_drop(standard_metadata)"
+          }
+        }
+      ]
+    },
+    {
+      "name" : "IngressPipeImpl.set_egress_port",
+      "id" : 2,
+      "runtime_data" : [
+        {
+          "name" : "port_num",
+          "bitwidth" : 9
+        }
+      ],
+      "primitives" : [
+        {
+          "op" : "assign",
+          "parameters" : [
+            {
+              "type" : "field",
+              "value" : ["standard_metadata", "egress_spec"]
+            },
+            {
+              "type" : "runtime_data",
+              "value" : 0
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 81,
+            "column" : 8,
+            "source_fragment" : "standard_metadata.egress_spec = port_num"
+          }
+        }
+      ]
+    },
+    {
+      "name" : "IngressPipeImpl.set_egress_port",
+      "id" : 3,
+      "runtime_data" : [
+        {
+          "name" : "port_num",
+          "bitwidth" : 9
+        }
+      ],
+      "primitives" : [
+        {
+          "op" : "assign",
+          "parameters" : [
+            {
+              "type" : "field",
+              "value" : ["standard_metadata", "egress_spec"]
+            },
+            {
+              "type" : "runtime_data",
+              "value" : 0
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 81,
+            "column" : 8,
+            "source_fragment" : "standard_metadata.egress_spec = port_num"
+          }
+        }
+      ]
+    },
+    {
+      "name" : "IngressPipeImpl.set_multicast_group",
+      "id" : 4,
+      "runtime_data" : [
+        {
+          "name" : "gid",
+          "bitwidth" : 16
+        }
+      ],
+      "primitives" : [
+        {
+          "op" : "assign",
+          "parameters" : [
+            {
+              "type" : "field",
+              "value" : ["standard_metadata", "mcast_grp"]
+            },
+            {
+              "type" : "runtime_data",
+              "value" : 0
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 89,
+            "column" : 8,
+            "source_fragment" : "standard_metadata.mcast_grp = gid"
+          }
+        },
+        {
+          "op" : "assign",
+          "parameters" : [
+            {
+              "type" : "field",
+              "value" : ["scalars", "local_metadata_t.is_multicast"]
+            },
+            {
+              "type" : "expression",
+              "value" : {
+                "type" : "expression",
+                "value" : {
+                  "op" : "b2d",
+                  "left" : null,
+                  "right" : {
+                    "type" : "bool",
+                    "value" : true
+                  }
+                }
+              }
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 90,
+            "column" : 8,
+            "source_fragment" : "local_metadata.is_multicast = true"
+          }
+        }
+      ]
+    },
+    {
+      "name" : "IngressPipeImpl.set_multicast_group",
+      "id" : 5,
+      "runtime_data" : [
+        {
+          "name" : "gid",
+          "bitwidth" : 16
+        }
+      ],
+      "primitives" : [
+        {
+          "op" : "assign",
+          "parameters" : [
+            {
+              "type" : "field",
+              "value" : ["standard_metadata", "mcast_grp"]
+            },
+            {
+              "type" : "runtime_data",
+              "value" : 0
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 89,
+            "column" : 8,
+            "source_fragment" : "standard_metadata.mcast_grp = gid"
+          }
+        },
+        {
+          "op" : "assign",
+          "parameters" : [
+            {
+              "type" : "field",
+              "value" : ["scalars", "local_metadata_t.is_multicast"]
+            },
+            {
+              "type" : "expression",
+              "value" : {
+                "type" : "expression",
+                "value" : {
+                  "op" : "b2d",
+                  "left" : null,
+                  "right" : {
+                    "type" : "bool",
+                    "value" : true
+                  }
+                }
+              }
+            }
+          ],
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 90,
+            "column" : 8,
+            "source_fragment" : "local_metadata.is_multicast = true"
+          }
+        }
+      ]
+    },
+    {
+      "name" : "act",
+      "id" : 6,
+      "runtime_data" : [],
+      "primitives" : [
+        {
+          "op" : "assign",
+          "parameters" : [
+            {
+              "type" : "field",
+              "value" : ["scalars", "tmp"]
+            },
+            {
+              "type" : "expression",
+              "value" : {
+                "type" : "expression",
+                "value" : {
+                  "op" : "b2d",
+                  "left" : null,
+                  "right" : {
+                    "type" : "bool",
+                    "value" : true
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name" : "act_0",
+      "id" : 7,
+      "runtime_data" : [],
+      "primitives" : [
+        {
+          "op" : "assign",
+          "parameters" : [
+            {
+              "type" : "field",
+              "value" : ["scalars", "tmp"]
+            },
+            {
+              "type" : "expression",
+              "value" : {
+                "type" : "expression",
+                "value" : {
+                  "op" : "b2d",
+                  "left" : null,
+                  "right" : {
+                    "type" : "bool",
+                    "value" : false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ],
+  "pipelines" : [
+    {
+      "name" : "ingress",
+      "id" : 0,
+      "source_info" : {
+        "filename" : "p4src/main.p4",
+        "line" : 71,
+        "column" : 8,
+        "source_fragment" : "IngressPipeImpl"
+      },
+      "init_table" : "IngressPipeImpl.l2_exact_table",
+      "tables" : [
+        {
+          "name" : "IngressPipeImpl.l2_exact_table",
+          "id" : 0,
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 95,
+            "column" : 10,
+            "source_fragment" : "l2_exact_table"
+          },
+          "key" : [
+            {
+              "match_type" : "exact",
+              "name" : "hdr.ethernet.dst_addr",
+              "target" : ["ethernet", "dst_addr"],
+              "mask" : null
+            }
+          ],
+          "match_type" : "exact",
+          "type" : "simple",
+          "max_size" : 1024,
+          "with_counters" : false,
+          "support_timeout" : false,
+          "direct_meters" : null,
+          "action_ids" : [2, 4, 0],
+          "actions" : ["IngressPipeImpl.set_egress_port", "IngressPipeImpl.set_multicast_group", "IngressPipeImpl.drop"],
+          "base_default_next" : null,
+          "next_tables" : {
+            "__HIT__" : "tbl_act",
+            "__MISS__" : "tbl_act_0"
+          },
+          "default_entry" : {
+            "action_id" : 0,
+            "action_const" : true,
+            "action_data" : [],
+            "action_entry_const" : true
+          }
+        },
+        {
+          "name" : "tbl_act",
+          "id" : 1,
+          "key" : [],
+          "match_type" : "exact",
+          "type" : "simple",
+          "max_size" : 1024,
+          "with_counters" : false,
+          "support_timeout" : false,
+          "direct_meters" : null,
+          "action_ids" : [6],
+          "actions" : ["act"],
+          "base_default_next" : "node_5",
+          "next_tables" : {
+            "act" : "node_5"
+          },
+          "default_entry" : {
+            "action_id" : 6,
+            "action_const" : true,
+            "action_data" : [],
+            "action_entry_const" : true
+          }
+        },
+        {
+          "name" : "tbl_act_0",
+          "id" : 2,
+          "key" : [],
+          "match_type" : "exact",
+          "type" : "simple",
+          "max_size" : 1024,
+          "with_counters" : false,
+          "support_timeout" : false,
+          "direct_meters" : null,
+          "action_ids" : [7],
+          "actions" : ["act_0"],
+          "base_default_next" : "node_5",
+          "next_tables" : {
+            "act_0" : "node_5"
+          },
+          "default_entry" : {
+            "action_id" : 7,
+            "action_const" : true,
+            "action_data" : [],
+            "action_entry_const" : true
+          }
+        },
+        {
+          "name" : "IngressPipeImpl.l2_ternary_table",
+          "id" : 3,
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 109,
+            "column" : 10,
+            "source_fragment" : "l2_ternary_table"
+          },
+          "key" : [
+            {
+              "match_type" : "ternary",
+              "name" : "hdr.ethernet.dst_addr",
+              "target" : ["ethernet", "dst_addr"],
+              "mask" : null
+            }
+          ],
+          "match_type" : "ternary",
+          "type" : "simple",
+          "max_size" : 1024,
+          "with_counters" : false,
+          "support_timeout" : false,
+          "direct_meters" : null,
+          "action_ids" : [3, 5, 1],
+          "actions" : ["IngressPipeImpl.set_egress_port", "IngressPipeImpl.set_multicast_group", "IngressPipeImpl.drop"],
+          "base_default_next" : null,
+          "next_tables" : {
+            "IngressPipeImpl.set_egress_port" : null,
+            "IngressPipeImpl.set_multicast_group" : null,
+            "IngressPipeImpl.drop" : null
+          },
+          "default_entry" : {
+            "action_id" : 1,
+            "action_const" : true,
+            "action_data" : [],
+            "action_entry_const" : true
+          }
+        }
+      ],
+      "action_profiles" : [],
+      "conditionals" : [
+        {
+          "name" : "node_5",
+          "id" : 0,
+          "source_info" : {
+            "filename" : "p4src/main.p4",
+            "line" : 122,
+            "column" : 12,
+            "source_fragment" : "!l2_exact_table.apply().hit"
+          },
+          "expression" : {
+            "type" : "expression",
+            "value" : {
+              "op" : "not",
+              "left" : null,
+              "right" : {
+                "type" : "expression",
+                "value" : {
+                  "op" : "d2b",
+                  "left" : null,
+                  "right" : {
+                    "type" : "field",
+                    "value" : ["scalars", "tmp"]
+                  }
+                }
+              }
+            }
+          },
+          "false_next" : null,
+          "true_next" : "IngressPipeImpl.l2_ternary_table"
+        }
+      ]
+    },
+    {
+      "name" : "egress",
+      "id" : 1,
+      "source_info" : {
+        "filename" : "p4src/main.p4",
+        "line" : 134,
+        "column" : 8,
+        "source_fragment" : "EgressPipeImpl"
+      },
+      "init_table" : null,
+      "tables" : [],
+      "action_profiles" : [],
+      "conditionals" : []
+    }
+  ],
+  "checksums" : [],
+  "force_arith" : [],
+  "extern_instances" : [],
+  "field_aliases" : [
+    [
+      "queueing_metadata.enq_timestamp",
+      ["standard_metadata", "enq_timestamp"]
+    ],
+    [
+      "queueing_metadata.enq_qdepth",
+      ["standard_metadata", "enq_qdepth"]
+    ],
+    [
+      "queueing_metadata.deq_timedelta",
+      ["standard_metadata", "deq_timedelta"]
+    ],
+    [
+      "queueing_metadata.deq_qdepth",
+      ["standard_metadata", "deq_qdepth"]
+    ],
+    [
+      "intrinsic_metadata.ingress_global_timestamp",
+      ["standard_metadata", "ingress_global_timestamp"]
+    ],
+    [
+      "intrinsic_metadata.egress_global_timestamp",
+      ["standard_metadata", "egress_global_timestamp"]
+    ],
+    [
+      "intrinsic_metadata.lf_field_list",
+      ["standard_metadata", "lf_field_list"]
+    ],
+    [
+      "intrinsic_metadata.mcast_grp",
+      ["standard_metadata", "mcast_grp"]
+    ],
+    [
+      "intrinsic_metadata.resubmit_flag",
+      ["standard_metadata", "resubmit_flag"]
+    ],
+    [
+      "intrinsic_metadata.egress_rid",
+      ["standard_metadata", "egress_rid"]
+    ],
+    [
+      "intrinsic_metadata.recirculate_flag",
+      ["standard_metadata", "recirculate_flag"]
+    ],
+    [
+      "intrinsic_metadata.priority",
+      ["standard_metadata", "priority"]
+    ]
+  ],
+  "program" : "p4src/main.p4",
+  "__meta__" : {
+    "version" : [2, 18],
+    "compiler" : "https://github.com/p4lang/p4c"
+  }
+}
\ No newline at end of file
diff --git a/src/tests/netx22-p4/p4/main.p4 b/src/tests/netx22-p4/p4/main.p4
new file mode 100644
index 0000000000000000000000000000000000000000..fb94bdfd24722862831989bad1e173858f5a0d4d
--- /dev/null
+++ b/src/tests/netx22-p4/p4/main.p4
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <core.p4>
+#include <v1model.p4>
+
+typedef bit<9>   port_num_t;
+typedef bit<48>  mac_addr_t;
+typedef bit<16>  mcast_group_id_t;
+
+//------------------------------------------------------------------------------
+// HEADER DEFINITIONS
+//------------------------------------------------------------------------------
+
+header ethernet_t {
+    mac_addr_t  dst_addr;
+    mac_addr_t  src_addr;
+    bit<16>     ether_type;
+}
+
+struct parsed_headers_t {
+    ethernet_t  ethernet;
+}
+
+struct local_metadata_t {
+    bool        is_multicast;
+}
+
+
+//------------------------------------------------------------------------------
+// INGRESS PIPELINE
+//------------------------------------------------------------------------------
+
+parser ParserImpl (packet_in packet,
+                   out parsed_headers_t hdr,
+                   inout local_metadata_t local_metadata,
+                   inout standard_metadata_t standard_metadata)
+{
+    state start {
+      transition parse_ethernet;
+    }
+
+    state parse_ethernet {
+        packet.extract(hdr.ethernet);
+        transition accept;
+    }
+}
+
+
+control VerifyChecksumImpl(inout parsed_headers_t hdr,
+                           inout local_metadata_t meta)
+{
+    apply { /* EMPTY */ }
+}
+
+
+control IngressPipeImpl (inout parsed_headers_t    hdr,
+                         inout local_metadata_t    local_metadata,
+                         inout standard_metadata_t standard_metadata) {
+
+    // Drop action shared by many tables.
+    action drop() {
+        mark_to_drop(standard_metadata);
+    }
+
+    action set_egress_port(port_num_t port_num) {
+        standard_metadata.egress_spec = port_num;
+    }
+
+    action set_multicast_group(mcast_group_id_t gid) {
+        // gid will be used by the Packet Replication Engine (PRE) in the
+        // Traffic Manager--located right after the ingress pipeline, to
+        // replicate a packet to multiple egress ports, specified by the control
+        // plane by means of P4Runtime MulticastGroupEntry messages.
+        standard_metadata.mcast_grp = gid;
+        local_metadata.is_multicast = true;
+    }
+
+    // --- l2_exact_table ------------------
+
+    table l2_exact_table {
+        key = {
+            hdr.ethernet.dst_addr: exact;
+        }
+        actions = {
+            set_egress_port;
+            set_multicast_group;
+            @defaultonly drop;
+        }
+        const default_action = drop;
+    }
+
+    // --- l2_ternary_table ------------------
+
+    table l2_ternary_table {
+        key = {
+            hdr.ethernet.dst_addr: ternary;
+        }
+        actions = {
+            set_egress_port;
+            set_multicast_group;
+            @defaultonly drop;
+        }
+        const default_action = drop;
+    }
+
+    apply {
+        if (!l2_exact_table.apply().hit) {
+            // ...if an entry is NOT found, apply the ternary one in case
+            // this is a multicast/broadcast NDP NS packet.
+            l2_ternary_table.apply();
+        }
+    }
+}
+
+//------------------------------------------------------------------------------
+// EGRESS PIPELINE
+//------------------------------------------------------------------------------
+
+control EgressPipeImpl (inout parsed_headers_t hdr,
+                        inout local_metadata_t local_metadata,
+                        inout standard_metadata_t standard_metadata) {
+    apply { /* EMPTY */ }
+}
+
+
+control ComputeChecksumImpl(inout parsed_headers_t hdr,
+                            inout local_metadata_t local_metadata)
+{
+    apply { /* EMPTY */ }
+}
+
+
+control DeparserImpl(packet_out packet, in parsed_headers_t hdr) {
+    apply {
+        packet.emit(hdr.ethernet);
+    }
+}
+
+
+V1Switch(
+    ParserImpl(),
+    VerifyChecksumImpl(),
+    IngressPipeImpl(),
+    EgressPipeImpl(),
+    ComputeChecksumImpl(),
+    DeparserImpl()
+) main;
diff --git a/src/tests/netx22-p4/p4/p4info.txt b/src/tests/netx22-p4/p4/p4info.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4048dfcd32bdb5dfd58b89b6b83ab7e1fc0188ca
--- /dev/null
+++ b/src/tests/netx22-p4/p4/p4info.txt
@@ -0,0 +1,88 @@
+pkg_info {
+  arch: "v1model"
+}
+tables {
+  preamble {
+    id: 33605373
+    name: "IngressPipeImpl.l2_exact_table"
+    alias: "l2_exact_table"
+  }
+  match_fields {
+    id: 1
+    name: "hdr.ethernet.dst_addr"
+    bitwidth: 48
+    match_type: EXACT
+  }
+  action_refs {
+    id: 16812802
+  }
+  action_refs {
+    id: 16841371
+  }
+  action_refs {
+    id: 16796182
+    annotations: "@defaultonly"
+    scope: DEFAULT_ONLY
+  }
+  const_default_action_id: 16796182
+  size: 1024
+}
+tables {
+  preamble {
+    id: 33573501
+    name: "IngressPipeImpl.l2_ternary_table"
+    alias: "l2_ternary_table"
+  }
+  match_fields {
+    id: 1
+    name: "hdr.ethernet.dst_addr"
+    bitwidth: 48
+    match_type: TERNARY
+  }
+  action_refs {
+    id: 16812802
+  }
+  action_refs {
+    id: 16841371
+  }
+  action_refs {
+    id: 16796182
+    annotations: "@defaultonly"
+    scope: DEFAULT_ONLY
+  }
+  const_default_action_id: 16796182
+  size: 1024
+}
+actions {
+  preamble {
+    id: 16796182
+    name: "IngressPipeImpl.drop"
+    alias: "drop"
+  }
+}
+actions {
+  preamble {
+    id: 16812802
+    name: "IngressPipeImpl.set_egress_port"
+    alias: "set_egress_port"
+  }
+  params {
+    id: 1
+    name: "port_num"
+    bitwidth: 9
+  }
+}
+actions {
+  preamble {
+    id: 16841371
+    name: "IngressPipeImpl.set_multicast_group"
+    alias: "set_multicast_group"
+  }
+  params {
+    id: 1
+    name: "gid"
+    bitwidth: 16
+  }
+}
+type_info {
+}
diff --git a/src/tests/netx22-p4/run_test_01_bootstrap.sh b/src/tests/netx22-p4/run_test_01_bootstrap.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a3aeaa2b624bf28a06d379247e97211915522746
--- /dev/null
+++ b/src/tests/netx22-p4/run_test_01_bootstrap.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make sure to source the following scripts:
+# - my_deploy.sh
+# - tfs_runtime_env_vars.sh
+
+source tfs_runtime_env_vars.sh
+python -m pytest --verbose src/tests/netx22-p4/tests/test_functional_bootstrap.py
diff --git a/src/tests/netx22-p4/run_test_02_create_service.sh b/src/tests/netx22-p4/run_test_02_create_service.sh
new file mode 100755
index 0000000000000000000000000000000000000000..eb2b2d1ab2861bbcfc1d1fcd091ffc784945ceae
--- /dev/null
+++ b/src/tests/netx22-p4/run_test_02_create_service.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source tfs_runtime_env_vars.sh
+python -m pytest --verbose src/tests/netx22-p4/tests/test_functional_create_service.py
diff --git a/src/tests/netx22-p4/run_test_03_delete_service.sh b/src/tests/netx22-p4/run_test_03_delete_service.sh
new file mode 100755
index 0000000000000000000000000000000000000000..918073c84ab0a1b42c832fb2bf048eabb0aeabc2
--- /dev/null
+++ b/src/tests/netx22-p4/run_test_03_delete_service.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source tfs_runtime_env_vars.sh
+python -m pytest --verbose src/tests/netx22-p4/tests/test_functional_delete_service.py
diff --git a/src/tests/netx22-p4/run_test_04_cleanup.sh b/src/tests/netx22-p4/run_test_04_cleanup.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9e70d02ad09fcdd5c3f7a77b3f0361f366c7f989
--- /dev/null
+++ b/src/tests/netx22-p4/run_test_04_cleanup.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source tfs_runtime_env_vars.sh
+python -m pytest --verbose src/tests/netx22-p4/tests/test_functional_cleanup.py
diff --git a/src/tests/netx22-p4/setup.sh b/src/tests/netx22-p4/setup.sh
new file mode 100755
index 0000000000000000000000000000000000000000..07fe22e6aea2341c50462010b4bfb55c4a657a47
--- /dev/null
+++ b/src/tests/netx22-p4/setup.sh
@@ -0,0 +1,8 @@
+#! /bin/bash
+
+export POD_NAME=$(kubectl get pods -n=tfs | grep device | awk '{print $1}')
+
+kubectl exec ${POD_NAME} -n=tfs -- mkdir /root/p4
+
+kubectl cp src/tests/netx22-p4/p4/p4info.txt tfs/${POD_NAME}:/root/p4
+kubectl cp src/tests/netx22-p4/p4/bmv2.json tfs/${POD_NAME}:/root/p4
diff --git a/src/tests/netx22-p4/tests/.gitignore b/src/tests/netx22-p4/tests/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..76cb708d1b532c9b69166e55f36bcb912fd5e370
--- /dev/null
+++ b/src/tests/netx22-p4/tests/.gitignore
@@ -0,0 +1,2 @@
+# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc.
+Credentials.py
diff --git a/src/tests/netx22-p4/tests/BuildDescriptors.py b/src/tests/netx22-p4/tests/BuildDescriptors.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c5419190487eb5089e4a30f523dca43fa3870f2
--- /dev/null
+++ b/src/tests/netx22-p4/tests/BuildDescriptors.py
@@ -0,0 +1,35 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, json, sys
+from .Objects import CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+
+def main():
+    with open('tests/ofc22/descriptors_emulated.json', 'w', encoding='UTF-8') as f:
+        devices = []
+        for device,connect_rules in DEVICES:
+            device = copy.deepcopy(device)
+            device['device_config']['config_rules'].extend(connect_rules)
+            devices.append(device)
+
+        f.write(json.dumps({
+            'contexts': CONTEXTS,
+            'topologies': TOPOLOGIES,
+            'devices': devices,
+            'links': LINKS
+        }))
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/tests/netx22-p4/tests/LoadDescriptors.py b/src/tests/netx22-p4/tests/LoadDescriptors.py
new file mode 100644
index 0000000000000000000000000000000000000000..33bc699af933601e4c6d4b8dbc7b0c51206241ef
--- /dev/null
+++ b/src/tests/netx22-p4/tests/LoadDescriptors.py
@@ -0,0 +1,40 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging, sys
+from common.Settings import get_setting
+from context.client.ContextClient import ContextClient
+from common.proto.context_pb2 import Context, Device, Link, Topology
+from device.client.DeviceClient import DeviceClient
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+def main():
+    context_client = ContextClient(
+        get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC'))
+    device_client  = DeviceClient(
+        get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC'))
+
+    with open('tests/ofc22/descriptors.json', 'r', encoding='UTF-8') as f:
+        descriptors = json.loads(f.read())
+
+    for context  in descriptors['contexts'  ]: context_client.SetContext (Context (**context ))
+    for topology in descriptors['topologies']: context_client.SetTopology(Topology(**topology))
+    for device   in descriptors['devices'   ]: device_client .AddDevice  (Device  (**device  ))
+    for link     in descriptors['links'     ]: context_client.SetLink    (Link    (**link    ))
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/tests/netx22-p4/tests/Objects.py b/src/tests/netx22-p4/tests/Objects.py
new file mode 100644
index 0000000000000000000000000000000000000000..09b3aced843a198b7c963a34492a4fe2379c9123
--- /dev/null
+++ b/src/tests/netx22-p4/tests/Objects.py
@@ -0,0 +1,276 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from typing import Dict, List, Tuple
+from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
+from common.tools.object_factory.Context import json_context, json_context_id
+from common.tools.object_factory.Device import (
+    json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled,
+    json_device_connect_rules, json_device_id, json_device_p4_disabled,
+    json_device_emulated_tapi_disabled, json_device_id, json_device_packetrouter_disabled, json_device_tapi_disabled)
+from common.tools.object_factory.ConfigRule import (
+    json_config_rule_set, json_config_rule_delete)
+from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id
+from common.tools.object_factory.Link import json_link, json_link_id
+from common.tools.object_factory.Topology import json_topology, json_topology_id
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+
+# ----- Context --------------------------------------------------------------------------------------------------------
+CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
+CONTEXT    = json_context(DEFAULT_CONTEXT_UUID)
+
+# ----- Topology -------------------------------------------------------------------------------------------------------
+TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
+TOPOLOGY    = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID)
+
+# ----- Monitoring Samples ---------------------------------------------------------------------------------------------
+PACKET_PORT_SAMPLE_TYPES = [
+    KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED,
+    KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED,
+    KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED,
+    KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED,
+]
+
+
+# ----- Devices --------------------------------------------------------------------------------------------------------
+
+CUR_PATH = os.path.dirname(os.path.abspath(__file__))
+
+DEVICE_SW1_UUID             = 'SW1'
+DEVICE_SW1_TIMEOUT          = 60
+DEVICE_SW1_ID               = json_device_id(DEVICE_SW1_UUID)
+DEVICE_SW1                  = json_device_p4_disabled(DEVICE_SW1_UUID)
+
+DEVICE_SW1_DPID             = 1
+DEVICE_SW1_NAME             = DEVICE_SW1_UUID
+DEVICE_SW1_IP_ADDR          = '10.0.2.10'
+DEVICE_SW1_PORT             = '50001'
+DEVICE_SW1_VENDOR           = 'Open Networking Foundation'
+DEVICE_SW1_HW_VER           = 'BMv2 simple_switch'
+DEVICE_SW1_SW_VER           = 'Stratum'
+
+DEVICE_SW1_BIN_PATH         = '/root/p4/bmv2.json'
+DEVICE_SW1_INFO_PATH        = '/root/p4/p4info.txt'
+
+DEVICE_SW1_CONNECT_RULES    = json_device_connect_rules(
+    DEVICE_SW1_IP_ADDR,
+    DEVICE_SW1_PORT,
+    {
+        'id':       DEVICE_SW1_DPID,
+        'name':     DEVICE_SW1_NAME,
+        'vendor':   DEVICE_SW1_VENDOR,
+        'hw_ver':   DEVICE_SW1_HW_VER,
+        'sw_ver':   DEVICE_SW1_SW_VER,
+        'timeout':  DEVICE_SW1_TIMEOUT,
+        'p4bin':    DEVICE_SW1_BIN_PATH,
+        'p4info':   DEVICE_SW1_INFO_PATH
+    }
+)
+
+
+################################## TABLE ENTRIES ##################################
+
+
+DEVICE_SW1_CONFIG_TABLE_ENTRIES = [
+    json_config_rule_set(
+        'table',
+        {
+            'table-name': 'IngressPipeImpl.l2_exact_table',
+            'match-fields': [
+                {
+                    'match-field': 'hdr.ethernet.dst_addr',
+                    'match-value': 'aa:bb:cc:dd:ee:11'
+                }
+            ],
+            'action-name': 'IngressPipeImpl.set_egress_port',
+            'action-params': [
+                {
+                    'action-param': 'port_num',
+                    'action-value': '1'
+                }
+            ]
+        }
+    ),
+    json_config_rule_set(
+        'table',
+        {
+            'table-name': 'IngressPipeImpl.l2_exact_table',
+            'match-fields': [
+                {
+                    'match-field': 'hdr.ethernet.dst_addr',
+                    'match-value': 'aa:bb:cc:dd:ee:22'
+                }
+            ],
+            'action-name': 'IngressPipeImpl.set_egress_port',
+            'action-params': [
+                {
+                    'action-param': 'port_num',
+                    'action-value': '2'
+                }
+            ]
+        }
+    )
+]
+
+
+"""
+DEVICE_SW1_CONFIG_TABLE_ENTRIES = [
+    json_config_rule_set(
+        'table',
+        {
+            'table-name': 'IngressPipeImpl.l2_ternary_table',
+            'match-fields': [
+                {
+                    'match-field': 'hdr.ethernet.dst_addr',
+                    'match-value': 'aa:bb:cc:dd:ee:11 &&& ff:ff:ff:ff:ff:ff'
+                }
+            ],
+            'action-name': 'IngressPipeImpl.set_egress_port',
+            'action-params': [
+                {
+                    'action-param': 'port_num',
+                    'action-value': '1'
+                }
+            ],
+            'priority': 1
+        }
+    ),
+    json_config_rule_set(
+        'table',
+        {
+            'table-name': 'IngressPipeImpl.l2_ternary_table',
+            'match-fields': [
+                {
+                    'match-field': 'hdr.ethernet.dst_addr',
+                    'match-value': 'aa:bb:cc:dd:ee:22 &&& ff:ff:ff:ff:ff:ff'
+                }
+            ],
+            'action-name': 'IngressPipeImpl.set_egress_port',
+            'action-params': [
+                {
+                    'action-param': 'port_num',
+                    'action-value': '2'
+                }
+            ],   
+            'priority': 1
+        }
+    ),
+]
+"""
+
+################################## TABLE DECONF ##################################
+
+
+DEVICE_SW1_DECONF_TABLE_ENTRIES = [
+    json_config_rule_delete(
+        'table',
+        {
+            'table-name': 'IngressPipeImpl.l2_exact_table',
+            'match-fields': [
+                {
+                    'match-field': 'hdr.ethernet.dst_addr',
+                    'match-value': 'aa:bb:cc:dd:ee:11'
+                }
+            ],
+            'action-name': 'IngressPipeImpl.set_egress_port',
+            'action-params': [
+                {
+                    'action-param': 'port_num',
+                    'action-value': '1'
+                }
+            ]
+        }
+    ),
+    json_config_rule_delete(
+        'table',
+        {
+            'table-name': 'IngressPipeImpl.l2_exact_table',
+            'match-fields': [
+                {
+                    'match-field': 'hdr.ethernet.dst_addr',
+                    'match-value': 'aa:bb:cc:dd:ee:22'
+                }
+            ],
+            'action-name': 'IngressPipeImpl.set_egress_port',
+            'action-params': [
+                {
+                    'action-param': 'port_num',
+                    'action-value': '2'
+                }
+            ]
+        }
+    )
+]
+
+
+
+"""
+DEVICE_SW1_DECONF_TABLE_ENTRIES = [
+    json_config_rule_delete(
+        'table',
+        {
+            'table-name': 'IngressPipeImpl.l2_ternary_table',
+            'match-fields': [
+                {
+                    'match-field': 'hdr.ethernet.dst_addr',
+                    'match-value': 'aa:bb:cc:dd:ee:11 &&& ff:ff:ff:ff:ff:ff'
+                }
+            ],
+            'action-name': 'IngressPipeImpl.set_egress_port',
+            'action-params': [
+                {
+                    'action-param': 'port_num',
+                    'action-value': '1'
+                }
+            ],
+            'priority': 1
+        }
+    ),
+    json_config_rule_delete(
+        'table',
+        {
+            'table-name': 'IngressPipeImpl.l2_ternary_table',
+            'match-fields': [
+                {
+                    'match-field': 'hdr.ethernet.dst_addr',
+                    'match-value': 'aa:bb:cc:dd:ee:22 &&& ff:ff:ff:ff:ff:ff'
+                }
+            ],
+            'action-name': 'IngressPipeImpl.set_egress_port',
+            'action-params': [
+                {
+                    'action-param': 'port_num',
+                    'action-value': '2'
+                }
+            ],   
+            'priority': 1
+        }
+    ),
+]
+"""
+
+# ----- Links ----------------------------------------------------------------------------------------------------------
+
+# ----- WIM Service Settings -------------------------------------------------------------------------------------------
+
+# ----- Object Collections ---------------------------------------------------------------------------------------------
+
+CONTEXTS = [CONTEXT]
+TOPOLOGIES = [TOPOLOGY]
+
+DEVICES = [
+    (DEVICE_SW1, DEVICE_SW1_CONNECT_RULES, DEVICE_SW1_CONFIG_TABLE_ENTRIES, DEVICE_SW1_DECONF_TABLE_ENTRIES),
+]
+
+LINKS = []
diff --git a/src/tests/netx22-p4/tests/__init__.py b/src/tests/netx22-p4/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7
--- /dev/null
+++ b/src/tests/netx22-p4/tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/netx22-p4/tests/test_functional_bootstrap.py b/src/tests/netx22-p4/tests/test_functional_bootstrap.py
new file mode 100644
index 0000000000000000000000000000000000000000..0254ffd2602bd8dfc0766db0b9e766f7e7e79b32
--- /dev/null
+++ b/src/tests/netx22-p4/tests/test_functional_bootstrap.py
@@ -0,0 +1,95 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, logging, pytest
+from common.Settings import get_setting
+from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Device import json_device_id
+from common.tools.object_factory.Link import json_link_id
+from common.tools.object_factory.Topology import json_topology_id
+from context.client.ContextClient import ContextClient
+from context.client.EventsCollector import EventsCollector
+from common.proto.context_pb2 import ConfigActionEnum, Context, ContextId, Device, Empty, Link, Topology, DeviceOperationalStatusEnum
+from device.client.DeviceClient import DeviceClient
+from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+@pytest.fixture(scope='session')
+def context_client():
+    _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC'))
+    yield _client
+    _client.close()
+
+
+@pytest.fixture(scope='session')
+def device_client():
+    _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC'))
+    yield _client
+    _client.close()
+
+def test_prepare_scenario(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+
+    # ----- Create Contexts and Topologies -----------------------------------------------------------------------------
+    for context in CONTEXTS:
+        context_uuid = context['context_id']['context_uuid']['uuid']
+        LOGGER.info('Adding Context {:s}'.format(context_uuid))
+        response = context_client.SetContext(Context(**context))
+        assert response.context_uuid.uuid == context_uuid
+
+    for topology in TOPOLOGIES:
+        context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid']
+        topology_uuid = topology['topology_id']['topology_uuid']['uuid']
+        LOGGER.info('Adding Topology {:s}/{:s}'.format(context_uuid, topology_uuid))
+        response = context_client.SetTopology(Topology(**topology))
+        assert response.context_id.context_uuid.uuid == context_uuid
+        assert response.topology_uuid.uuid == topology_uuid
+        context_id = json_context_id(context_uuid)
+
+def test_scenario_ready(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+    # ----- List entities - Ensure scenario is ready -------------------------------------------------------------------
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == len(CONTEXTS)
+
+    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
+    assert len(response.topologies) == len(TOPOLOGIES)
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == 0
+
+def test_devices_bootstraping(
+    context_client : ContextClient, device_client : DeviceClient):  # pylint: disable=redefined-outer-name
+
+    # ----- Create Devices ---------------------------------------------------------------
+    for device, connect_rules, config_rules, _ in DEVICES:
+        device_uuid = device['device_id']['device_uuid']['uuid']
+        LOGGER.info('Adding Device {:s}'.format(device_uuid))
+
+        device_p4_with_connect_rules = copy.deepcopy(device)
+        device_p4_with_connect_rules['device_config']['config_rules'].extend(connect_rules)
+        response = device_client.AddDevice(Device(**device_p4_with_connect_rules))
+        assert response.device_uuid.uuid == device_uuid
+
+def test_devices_bootstrapped(context_client : ContextClient):  # pylint: disable=redefined-outer-name
+    # ----- List entities - Ensure bevices are created -----------------------------------------------------------------
+    response = context_client.ListContexts(Empty())
+    assert len(response.contexts) == len(CONTEXTS)
+
+    response = context_client.ListTopologies(ContextId(**CONTEXT_ID))
+    assert len(response.topologies) == len(TOPOLOGIES)
+
+    response = context_client.ListDevices(Empty())
+    assert len(response.devices) == len(DEVICES)
diff --git a/src/tests/netx22-p4/tests/test_functional_cleanup.py b/src/tests/netx22-p4/tests/test_functional_cleanup.py
new file mode 100644
index 0000000000000000000000000000000000000000..32f716f1c2287b11bae3610022d64659d82ba73d
--- /dev/null
+++ b/src/tests/netx22-p4/tests/test_functional_cleanup.py
@@ -0,0 +1,76 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, logging, pytest
+from common.Settings import get_setting
+from common.tests.EventTools import EVENT_REMOVE, check_events
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Device import json_device_id
+from common.tools.object_factory.Link import json_link_id
+from common.tools.object_factory.Topology import json_topology_id
+from context.client.ContextClient import ContextClient
+from context.client.EventsCollector import EventsCollector
+from common.proto.context_pb2 import ConfigActionEnum, ContextId, Device, DeviceId, Empty, LinkId, TopologyId, DeviceOperationalStatusEnum
+from device.client.DeviceClient import DeviceClient
+from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+
+@pytest.fixture(scope='session')
+def context_client():
+    _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC'))
+    yield _client
+    _client.close()
+
+
+@pytest.fixture(scope='session')
+def device_client():
+    _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC'))
+    yield _client
+    _client.close()
+
+def test_scenario_cleanup(
+    context_client : ContextClient, device_client : DeviceClient):  # pylint: disable=redefined-outer-name
+
+    # ----- Delete Devices and Validate Collected Events ---------------------------------------------------------------
+    for device, _, _, deconf_rules  in DEVICES:
+
+        device_id = device['device_id']
+        device_uuid = device_id['device_uuid']['uuid']
+        LOGGER.info('Deleting Device {:s}'.format(device_uuid))
+        device_client.DeleteDevice(DeviceId(**device_id))
+        #expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid)))
+
+        response = context_client.ListDevices(Empty())
+        assert len(response.devices) == 0
+
+    # ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------
+    for topology in TOPOLOGIES:
+        topology_id = topology['topology_id']
+        context_uuid = topology_id['context_id']['context_uuid']['uuid']
+        topology_uuid = topology_id['topology_uuid']['uuid']
+        LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid))
+        context_client.RemoveTopology(TopologyId(**topology_id))
+        context_id = json_context_id(context_uuid)
+        #expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id)))
+
+    # ----- Delete Contexts and Validate Collected Events --------------------------------------------------------------
+    for context in CONTEXTS:
+        context_id = context['context_id']
+        context_uuid = context_id['context_uuid']['uuid']
+        LOGGER.info('Deleting Context {:s}'.format(context_uuid))
+        context_client.RemoveContext(ContextId(**context_id))
+        #expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid)))
diff --git a/src/tests/netx22-p4/tests/test_functional_create_service.py b/src/tests/netx22-p4/tests/test_functional_create_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..501536bdb6299091cf791438f60f7e48cb0b0626
--- /dev/null
+++ b/src/tests/netx22-p4/tests/test_functional_create_service.py
@@ -0,0 +1,62 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, logging, pytest
+from common.Settings import get_setting
+from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Device import json_device_id
+from common.tools.object_factory.Link import json_link_id
+from common.tools.object_factory.Topology import json_topology_id
+from context.client.ContextClient import ContextClient
+from context.client.EventsCollector import EventsCollector
+from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology
+from device.client.DeviceClient import DeviceClient
+from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceId,\
+    DeviceOperationalStatusEnum
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+@pytest.fixture(scope='session')
+def context_client():
+    _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC'))
+    yield _client
+    _client.close()
+
+
+@pytest.fixture(scope='session')
+def device_client():
+    _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC'))
+    yield _client
+    _client.close()
+
+def test_rules_entry(
+    context_client : ContextClient, device_client : DeviceClient):  # pylint: disable=redefined-outer-name
+
+    # ----- Create Devices ---------------------------------------------------------------
+    for device, connect_rules, config_rules, _ in DEVICES:
+        # Enable device
+        device_p4_with_operational_status = copy.deepcopy(device)
+        device_p4_with_operational_status['device_operational_status'] = \
+            DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+        device_client.ConfigureDevice(Device(**device_p4_with_operational_status))
+
+        device_data = context_client.GetDevice(DeviceId(**json_device_id('SW1')))
+
+        # Insert table entries
+        device_p4_with_config_rules = copy.deepcopy(device)
+        device_p4_with_config_rules['device_config']['config_rules'].extend(config_rules)
+        device_client.ConfigureDevice(Device(**device_p4_with_config_rules))
diff --git a/src/tests/netx22-p4/tests/test_functional_delete_service.py b/src/tests/netx22-p4/tests/test_functional_delete_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..1be7e324080befe8c7e4719f364201ef16772fc8
--- /dev/null
+++ b/src/tests/netx22-p4/tests/test_functional_delete_service.py
@@ -0,0 +1,58 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, logging, pytest
+from common.Settings import get_setting
+from common.tests.EventTools import EVENT_REMOVE, check_events
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Device import json_device_id
+from common.tools.object_factory.Link import json_link_id
+from common.tools.object_factory.Topology import json_topology_id
+from context.client.ContextClient import ContextClient
+from context.client.EventsCollector import EventsCollector
+from common.proto.context_pb2 import ConfigActionEnum, ContextId, Device, DeviceId, Empty, LinkId, TopologyId, DeviceOperationalStatusEnum
+from device.client.DeviceClient import DeviceClient
+from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+
+@pytest.fixture(scope='session')
+def context_client():
+    _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC'))
+    yield _client
+    _client.close()
+
+
+@pytest.fixture(scope='session')
+def device_client():
+    _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC'))
+    yield _client
+    _client.close()
+
+def test_rules_delete(
+    context_client : ContextClient, device_client : DeviceClient):  # pylint: disable=redefined-outer-name
+
+    # ----- Delete Devices and Validate Collected Events ---------------------------------------------------------------
+    for device, _, _, deconf_rules  in DEVICES:
+
+        device_p4_with_deconf_rules = copy.deepcopy(device)
+        device_p4_with_deconf_rules['device_config']['config_rules'].extend(deconf_rules)
+        device_client.ConfigureDevice(Device(**device_p4_with_deconf_rules))
+
+        device_p4_with_operational_status = copy.deepcopy(device)
+        device_p4_with_operational_status['device_operational_status'] = \
+            DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
+        device_client.ConfigureDevice(Device(**device_p4_with_operational_status))