diff --git a/scripts/run_tests_locally-device-gnmi-openconfig.sh b/scripts/run_tests_locally-device-gnmi-openconfig.sh
new file mode 100755
index 0000000000000000000000000000000000000000..92e8448126f53a7f34b97c480585fea25f2b0411
--- /dev/null
+++ b/scripts/run_tests_locally-device-gnmi-openconfig.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+# Run unitary tests and analyze coverage of code at same time
+# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \
+    device/tests/gnmi_openconfig/test_unitary_gnmi_openconfig.py
diff --git a/scripts/run_tests_locally-device-openconfig-arista-l2vpn.sh b/scripts/run_tests_locally-device-openconfig-arista-l2vpn.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9e151dd3e4c482d90342af91de0cbb018fb1bf78
--- /dev/null
+++ b/scripts/run_tests_locally-device-openconfig-arista-l2vpn.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+PROJECTDIR=`pwd`
+
+cd $PROJECTDIR/src
+RCFILE=$PROJECTDIR/coverage/.coveragerc
+
+# Run unitary tests and analyze coverage of code at same time
+# helpful pytest flags: --log-level=INFO -o log_cli=true --verbose --maxfail=1 --durations=0
+coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO -o log_cli=true --verbose \
+    device/tests/test_unitary_openconfig_arista_l2vpn.py
diff --git a/src/common/tools/object_factory/EndPoint.py b/src/common/tools/object_factory/EndPoint.py
index 85a5d4494184567447d6d16fa7df2c530106c6ba..faf1accd37ebdc32b382a1cca16ccbeb510099f9 100644
--- a/src/common/tools/object_factory/EndPoint.py
+++ b/src/common/tools/object_factory/EndPoint.py
@@ -43,13 +43,14 @@ def json_endpoint_ids(
 
 def json_endpoint(
         device_id : Dict, endpoint_uuid : str, endpoint_type : str, topology_id : Optional[Dict] = None,
-        kpi_sample_types : List[int] = [], location : Optional[Dict] = None
+        name : Optional[str] = None, kpi_sample_types : List[int] = [], location : Optional[Dict] = None
     ):
 
     result = {
         'endpoint_id': json_endpoint_id(device_id, endpoint_uuid, topology_id=topology_id),
         'endpoint_type': endpoint_type,
     }
+    if name is not None: result['name'] = name
     if kpi_sample_types is not None and len(kpi_sample_types) > 0:
         result['kpi_sample_types'] = copy.deepcopy(kpi_sample_types)
     if location is not None:
diff --git a/src/device/Dockerfile b/src/device/Dockerfile
index 42bf4335cc6b0c6337c166dae8680e18d46d1360..31dffddc40a6c9dc3dca8f0f155ac543fd10b12d 100644
--- a/src/device/Dockerfile
+++ b/src/device/Dockerfile
@@ -16,9 +16,24 @@ FROM python:3.9-slim
 
 # Install dependencies
 RUN apt-get --yes --quiet --quiet update && \
-    apt-get --yes --quiet --quiet install wget g++ git && \
+    apt-get --yes --quiet --quiet install wget g++ git build-essential cmake libpcre2-dev python3-dev python3-cffi && \
     rm -rf /var/lib/apt/lists/*
 
+# Download, build and install libyang. Note that APT package is outdated
+# - Ref: https://github.com/CESNET/libyang
+# - Ref: https://github.com/CESNET/libyang-python/
+RUN mkdir -p /var/libyang
+RUN git clone https://github.com/CESNET/libyang.git /var/libyang
+WORKDIR /var/libyang
+RUN git fetch
+RUN git checkout v2.1.148
+RUN mkdir -p /var/libyang/build
+WORKDIR /var/libyang/build
+RUN cmake -D CMAKE_BUILD_TYPE:String="Release" ..
+RUN make
+RUN make install
+RUN ldconfig
+
 # Set Python to show logs as they occur
 ENV PYTHONUNBUFFERED=0
 
@@ -62,17 +77,30 @@ RUN python3 -m pip install -r requirements.txt
 
 # Add component files into working directory
 WORKDIR /var/teraflow
+COPY src/device/. device/
 COPY src/context/__init__.py context/__init__.py
 COPY src/context/client/. context/client/
 COPY src/monitoring/__init__.py monitoring/__init__.py
 COPY src/monitoring/client/. monitoring/client/
-COPY src/device/. device/
 
+# Clone test mock tools
 RUN mkdir -p tests/tools/mock_ietf_actn_sdn_ctrl
 RUN touch tests/__init__.py
 RUN touch tests/tools/__init__.py
 RUN touch tests/tools/mock_ietf_actn_sdn_ctrl/__init__.py
 COPY src/tests/tools/mock_ietf_actn_sdn_ctrl/. tests/tools/mock_ietf_actn_sdn_ctrl/
 
+# Clone OpenConfig YANG models
+RUN mkdir -p /var/teraflow/device/service/drivers/gnmi_openconfig/git/openconfig/public
+RUN mkdir -p /tmp/openconfig
+RUN git clone https://github.com/openconfig/public.git /tmp/openconfig
+WORKDIR /tmp/openconfig
+RUN git fetch
+RUN git checkout v4.4.0
+RUN mv /tmp/openconfig/release /var/teraflow/device/service/drivers/gnmi_openconfig/git/openconfig/public
+RUN mv /tmp/openconfig/third_party /var/teraflow/device/service/drivers/gnmi_openconfig/git/openconfig/public
+RUN rm -rf /tmp/openconfig
+WORKDIR /var/teraflow
+
 # Start the service
 ENTRYPOINT ["python", "-m", "device.service"]
diff --git a/src/device/requirements.in b/src/device/requirements.in
index bf5e6a2b3128f438a7c044c3f3cf9ee393de2265..a4d818b52b494f2ef6cc0938d69ddb133ab40859 100644
--- a/src/device/requirements.in
+++ b/src/device/requirements.in
@@ -12,9 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
 anytree==2.8.0
 APScheduler==3.10.1
+bitarray==2.8.*
 cryptography==36.0.2
 deepdiff==6.7.*
 deepmerge==1.1.*
@@ -22,26 +22,27 @@ deepmerge==1.1.*
 Flask==2.1.3
 Flask-HTTPAuth==4.5.0
 Flask-RESTful==0.3.9
+ipaddress
 Jinja2==3.0.3
-numpy<2.0.0
+libyang==2.8.0
+macaddress
 ncclient==0.6.15
+numpy<2.0.0
 p4runtime==1.3.0
 pandas==1.5.*
 paramiko==2.9.2
+pyang==2.6.*
+git+https://github.com/robshakir/pyangbind.git
 python-json-logger==2.0.2
 #pytz==2021.3
 #redis==4.1.2
 requests==2.27.1
 requests-mock==1.9.3
-xmltodict==0.12.0
 tabulate
-ipaddress
-macaddress
-yattag
-pyang==2.6.0
-git+https://github.com/robshakir/pyangbind.git
 websockets==10.4
 werkzeug==2.3.7
+xmltodict==0.12.0
+yattag
 
 # pip's dependency resolver does not take into account installed packages.
 # p4runtime does not specify the version of grpcio/protobuf it needs, so it tries to install latest one
diff --git a/src/device/service/drivers/gnmi_openconfig/DeltaSampleCache.py b/src/device/service/drivers/gnmi_openconfig/DeltaSampleCache.py
index daf04be5a1ff82a79031d8c3ffe19da10739fbcb..140efe84038ed5117d6c7924b188a7dc7dbd7958 100644
--- a/src/device/service/drivers/gnmi_openconfig/DeltaSampleCache.py
+++ b/src/device/service/drivers/gnmi_openconfig/DeltaSampleCache.py
@@ -13,13 +13,15 @@
 # limitations under the License.
 
 import copy
-from typing import Any, Dict, Tuple, Union
+from typing import Any, Dict, Optional, Tuple, Union
 
 class DeltaSampleCache:
     def __init__(self) -> None:
         self._previous_samples : Dict[str, Tuple[float, Union[int, float]]] = dict()
 
-    def get_delta(self, path : str, current_timestamp : float, current_value : Any) -> None:
+    def get_delta(
+        self, path : str, current_timestamp : float, current_value : Any
+    ) -> Optional[Tuple[float, Optional[Any]]]:
         previous_sample = copy.deepcopy(self._previous_samples.get(path))
         self._previous_samples[path] = current_timestamp, current_value
 
@@ -30,6 +32,10 @@ class DeltaSampleCache:
 
         delta_value = max(0, current_value - previous_value)
         delay = current_timestamp - previous_timestamp
-        delta_sample = current_timestamp, delta_value / delay
-
-        return delta_sample
+        if delay < 1.e-12:
+            # return a special value meaning, at that timestamp,
+            # computed value is not a number, e.g., division by zero
+            # also, recover previuos samples to do not miss any packet/byte
+            self._previous_samples[path] = previous_sample
+            return current_timestamp, None
+        return current_timestamp, delta_value / delay
diff --git a/src/device/service/drivers/gnmi_openconfig/GnmiSessionHandler.py b/src/device/service/drivers/gnmi_openconfig/GnmiSessionHandler.py
index 4428fb81ca304d20ee4b3bce259924278400249f..3668c6a3eeb4ab31e2ee01517efaab157c98f8d1 100644
--- a/src/device/service/drivers/gnmi_openconfig/GnmiSessionHandler.py
+++ b/src/device/service/drivers/gnmi_openconfig/GnmiSessionHandler.py
@@ -19,7 +19,8 @@ from common.type_checkers.Checkers import chk_float, chk_length, chk_string, chk
 from .gnmi.gnmi_pb2_grpc import gNMIStub
 from .gnmi.gnmi_pb2 import Encoding, GetRequest, SetRequest, UpdateResult   # pylint: disable=no-name-in-module
 from .handlers import ALL_RESOURCE_KEYS, compose, get_path, parse
-from .tools.Capabilities import get_supported_encodings
+from .handlers.YangHandler import YangHandler
+from .tools.Capabilities import check_capabilities
 from .tools.Channel import get_grpc_channel
 from .tools.Path import path_from_string, path_to_string #, compose_path
 from .tools.Subscriptions import Subscriptions
@@ -39,12 +40,22 @@ class GnmiSessionHandler:
         self._use_tls   = settings.get('use_tls', False)
         self._channel : Optional[grpc.Channel] = None
         self._stub : Optional[gNMIStub] = None
+        self._yang_handler = None
         self._monit_thread = None
-        self._supported_encodings = None
+        self._yang_handler = YangHandler()
         self._subscriptions = Subscriptions()
         self._in_subscriptions = queue.Queue()
         self._out_samples = queue.Queue()
 
+    def __del__(self) -> None:
+        self._logger.info('Destroying YangValidator...')
+        if self._yang_handler is not None:
+            self._logger.debug('yang_validator.data:')
+            for path, dnode in self._yang_handler.get_data_paths().items():
+                self._logger.debug('  {:s}: {:s}'.format(str(path), json.dumps(dnode.print_dict())))
+            self._yang_handler.destroy()
+        self._logger.info('DONE')
+
     @property
     def subscriptions(self): return self._subscriptions
 
@@ -58,8 +69,7 @@ class GnmiSessionHandler:
         with self._lock:
             self._channel = get_grpc_channel(self._address, self._port, self._use_tls, self._logger)
             self._stub = gNMIStub(self._channel)
-            self._supported_encodings = get_supported_encodings(
-                self._stub, self._username, self._password, timeout=120)
+            check_capabilities(self._stub, self._username, self._password, timeout=120)
             self._monit_thread = MonitoringThread(
                 self._stub, self._logger, self._settings, self._in_subscriptions, self._out_samples)
             self._monit_thread.start()
@@ -96,13 +106,15 @@ class GnmiSessionHandler:
                 self._logger.exception(MSG.format(str_resource_name, str(resource_key)))
                 parsing_results.append((resource_key, e)) # if validation fails, store the exception
 
+        self._logger.debug('parsing_results={:s}'.format(str(parsing_results)))
+
         if len(parsing_results) > 0:
             return parsing_results
 
         metadata = [('username', self._username), ('password', self._password)]
         timeout = None # GNMI_SUBSCRIPTION_TIMEOUT = int(sampling_duration)
         get_reply = self._stub.Get(get_request, metadata=metadata, timeout=timeout)
-        #self._logger.info('get_reply={:s}'.format(grpc_message_to_json_string(get_reply)))
+        self._logger.debug('get_reply={:s}'.format(grpc_message_to_json_string(get_reply)))
 
         results = []
         #results[str_filter] = [i, None, False]  # (index, value, processed?)
@@ -119,7 +131,7 @@ class GnmiSessionHandler:
             #    resource_key_tuple[2] = True
 
             for update in notification.update:
-                #self._logger.info('update={:s}'.format(grpc_message_to_json_string(update)))
+                self._logger.debug('update={:s}'.format(grpc_message_to_json_string(update)))
                 str_path = path_to_string(update.path)
                 #resource_key_tuple = results.get(str_path)
                 #if resource_key_tuple is None:
@@ -130,10 +142,10 @@ class GnmiSessionHandler:
                     value = decode_value(update.val)
                     #resource_key_tuple[1] = value
                     #resource_key_tuple[2] = True
-                    results.extend(parse(str_path, value))
+                    results.extend(parse(str_path, value, self._yang_handler))
                 except Exception as e: # pylint: disable=broad-except
-                    MSG = 'Exception processing notification {:s}'
-                    self._logger.exception(MSG.format(grpc_message_to_json_string(notification)))
+                    MSG = 'Exception processing update {:s}'
+                    self._logger.exception(MSG.format(grpc_message_to_json_string(update)))
                     results.append((str_path, e)) # if validation fails, store the exception
 
         #_results = sorted(results.items(), key=lambda x: x[1][0])
@@ -158,31 +170,34 @@ class GnmiSessionHandler:
 
         set_request = SetRequest()
         #for resource_key in resource_keys:
+        resources_requested = list()
         for resource_key, resource_value in resources:
-            self._logger.info('---1')
-            self._logger.info(str(resource_key))
-            self._logger.info(str(resource_value))
+            #self._logger.info('---1')
+            #self._logger.info(str(resource_key))
+            #self._logger.info(str(resource_value))
             #resource_tuple = resource_tuples.get(resource_key)
             #if resource_tuple is None: continue
             #_, value, exists, operation_done = resource_tuple
             if isinstance(resource_value, str): resource_value = json.loads(resource_value)
-            str_path, str_data = compose(resource_key, resource_value, delete=False)
-            self._logger.info('---3')
-            self._logger.info(str(str_path))
-            self._logger.info(str(str_data))
+            str_path, str_data = compose(resource_key, resource_value, self._yang_handler, delete=False)
+            if str_path is None: continue # nothing to set
+            #self._logger.info('---3')
+            #self._logger.info(str(str_path))
+            #self._logger.info(str(str_data))
             set_request_list = set_request.update #if exists else set_request.replace
             set_request_entry = set_request_list.add()
             set_request_entry.path.CopyFrom(path_from_string(str_path))
             set_request_entry.val.json_val = str_data.encode('UTF-8')
+            resources_requested.append((resource_key, resource_value))
 
-        self._logger.info('set_request={:s}'.format(grpc_message_to_json_string(set_request)))
+        self._logger.debug('set_request={:s}'.format(grpc_message_to_json_string(set_request)))
         metadata = [('username', self._username), ('password', self._password)]
         timeout = None # GNMI_SUBSCRIPTION_TIMEOUT = int(sampling_duration)
         set_reply = self._stub.Set(set_request, metadata=metadata, timeout=timeout)
-        self._logger.info('set_reply={:s}'.format(grpc_message_to_json_string(set_reply)))
+        self._logger.debug('set_reply={:s}'.format(grpc_message_to_json_string(set_reply)))
 
         results = []
-        for (resource_key, resource_value), update_result in zip(resources, set_reply.response):
+        for (resource_key, resource_value), update_result in zip(resources_requested, set_reply.response):
             operation = update_result.op
             if operation == UpdateResult.UPDATE:
                 results.append((resource_key, True))
@@ -227,30 +242,34 @@ class GnmiSessionHandler:
 
         set_request = SetRequest()
         #for resource_key in resource_keys:
+        resources_requested = list()
         for resource_key, resource_value in resources:
-            self._logger.info('---1')
-            self._logger.info(str(resource_key))
-            self._logger.info(str(resource_value))
+            #self._logger.info('---1')
+            #self._logger.info(str(resource_key))
+            #self._logger.info(str(resource_value))
             #resource_tuple = resource_tuples.get(resource_key)
             #if resource_tuple is None: continue
             #_, value, exists, operation_done = resource_tuple
             #if not exists: continue
             if isinstance(resource_value, str): resource_value = json.loads(resource_value)
-            str_path, str_data = compose(resource_key, resource_value, delete=True)
-            self._logger.info('---3')
-            self._logger.info(str(str_path))
-            self._logger.info(str(str_data))
+            # pylint: disable=unused-variable
+            str_path, str_data = compose(resource_key, resource_value, self._yang_handler, delete=True)
+            if str_path is None: continue # nothing to do with this resource_key
+            #self._logger.info('---3')
+            #self._logger.info(str(str_path))
+            #self._logger.info(str(str_data))
             set_request_entry = set_request.delete.add()
             set_request_entry.CopyFrom(path_from_string(str_path))
+            resources_requested.append((resource_key, resource_value))
 
-        self._logger.info('set_request={:s}'.format(grpc_message_to_json_string(set_request)))
+        self._logger.debug('set_request={:s}'.format(grpc_message_to_json_string(set_request)))
         metadata = [('username', self._username), ('password', self._password)]
         timeout = None # GNMI_SUBSCRIPTION_TIMEOUT = int(sampling_duration)
         set_reply = self._stub.Set(set_request, metadata=metadata, timeout=timeout)
-        self._logger.info('set_reply={:s}'.format(grpc_message_to_json_string(set_reply)))
+        self._logger.debug('set_reply={:s}'.format(grpc_message_to_json_string(set_reply)))
 
         results = []
-        for (resource_key, resource_value), update_result in zip(resources, set_reply.response):
+        for (resource_key, resource_value), update_result in zip(resources_requested, set_reply.response):
             operation = update_result.op
             if operation == UpdateResult.DELETE:
                 results.append((resource_key, True))
diff --git a/src/device/service/drivers/gnmi_openconfig/MonitoringThread.py b/src/device/service/drivers/gnmi_openconfig/MonitoringThread.py
index 8bf6704a854542b3a085af05d55391e23c8d224f..505c2f009cc7a2ab312e062f1ad82bd01d4c183e 100644
--- a/src/device/service/drivers/gnmi_openconfig/MonitoringThread.py
+++ b/src/device/service/drivers/gnmi_openconfig/MonitoringThread.py
@@ -94,9 +94,14 @@ class MonitoringThread(threading.Thread):
         subscriptions = []
         while not self._terminate.is_set():
             try:
-                subscription = self._in_subscriptions.get(block=True, timeout=0.1)
+                # Some devices do not support to process multiple
+                # SubscriptionList requests in a bidirectional channel.
+                # Increased timeout to 5 seconds assuming it should
+                # bring enough time to receive all the subscriptions in
+                # the queue and process them in bulk.
+                subscription = self._in_subscriptions.get(block=True, timeout=5.0)
                 operation, resource_key, sampling_duration, sampling_interval = subscription   # pylint: disable=unused-variable
-                if operation != 'subscribe': continue # Unsubscribe not supported by gNM, needs to cancel entire connection
+                if operation != 'subscribe': continue # Unsubscribe not supported by gNMI, needs to cancel entire connection
                 # options.timeout = int(sampling_duration)
                 #_path = parse_xpath(resource_key)
                 path = path_from_string(resource_key)
@@ -107,15 +112,15 @@ class MonitoringThread(threading.Thread):
                 subscriptions.append(subscription)
             except queue.Empty:
                 if len(subscriptions) == 0: continue
-                #self._logger.warning('[generate_requests] process')
+                self._logger.debug('[generate_requests] process')
                 prefix = path_from_string(GNMI_PATH_PREFIX) if GNMI_PATH_PREFIX is not None else None
                 qos = QOSMarking(marking=GNMI_QOS_MARKING) if GNMI_QOS_MARKING is not None else None
                 subscriptions_list = SubscriptionList(
                     prefix=prefix, mode=GNMI_SUBSCRIPTION_LIST_MODE, allow_aggregation=GNMI_ALLOW_AGGREGATION,
                     encoding=GNMI_ENCODING, subscription=subscriptions, qos=qos)
                 subscribe_request = SubscribeRequest(subscribe=subscriptions_list)
-                #str_subscribe_request = grpc_message_to_json_string(subscribe_request)
-                #self._logger.warning('[generate_requests] subscribe_request={:s}'.format(str_subscribe_request))
+                str_subscribe_request = grpc_message_to_json_string(subscribe_request)
+                self._logger.debug('[generate_requests] subscribe_request={:s}'.format(str_subscribe_request))
                 yield subscribe_request
                 subscriptions = []
             except: # pylint: disable=bare-except
@@ -134,7 +139,7 @@ class MonitoringThread(threading.Thread):
             self._response_iterator = self._stub.Subscribe(request_iterator, metadata=metadata, timeout=timeout)
             for subscribe_response in self._response_iterator:
                 str_subscribe_response = grpc_message_to_json_string(subscribe_response)
-                self._logger.warning('[run] subscribe_response={:s}'.format(str_subscribe_response))
+                self._logger.debug('[run] subscribe_response={:s}'.format(str_subscribe_response))
                 update = subscribe_response.update
                 timestamp_device = float(update.timestamp) / 1.e9
                 timestamp_local = datetime.timestamp(datetime.utcnow())
@@ -145,25 +150,37 @@ class MonitoringThread(threading.Thread):
                 else:
                     # might be clocks are not synchronized, use local timestamp
                     timestamp = timestamp_local
+                str_prefix = path_to_string(update.prefix) if len(update.prefix.elem) > 0 else ''
                 for update_entry in update.update:
                     str_path = path_to_string(update_entry.path)
+                    if len(str_prefix) > 0:
+                        str_path = '{:s}/{:s}'.format(str_prefix, str_path)
+                        str_path = str_path.replace('//', '/')
+                    if str_path.startswith('/interfaces/'):
+                        # Add namespace, if missing
+                        str_path_parts = str_path.split('/')
+                        str_path_parts[1] = 'openconfig-interfaces:interfaces'
+                        str_path = '/'.join(str_path_parts)
                     #if str_path != '/system/name/host-name': continue
                     #counter_name = update_entry.path[-1].name
                     value_type = update_entry.val.WhichOneof('value')
                     value = getattr(update_entry.val, value_type)
-                    if re.match(r'^[0-9]+$', value) is not None:
-                        value = int(value)
-                    elif re.match(r'^[0-9]*\.[0-9]*$', value) is not None:
-                        value = float(value)
-                    else:
-                        value = str(value)
+                    if isinstance(value, str):
+                        if re.match(r'^[0-9]+$', value) is not None:
+                            value = int(value)
+                        elif re.match(r'^[0-9]*\.[0-9]*$', value) is not None:
+                            value = float(value)
+                        else:
+                            value = str(value)
                     delta_sample = self._delta_sample_cache.get_delta(str_path, timestamp, value)
                     if delta_sample is None:
                         sample = (timestamp, str_path, value)
                     else:
                         sample = (delta_sample[0], str_path, delta_sample[1])
-                    self._logger.warning('[run] sample={:s}'.format(str(sample)))
-                    self._out_samples.put_nowait(sample)
+                    self._logger.debug('[run] sample={:s}'.format(str(sample)))
+                    if sample[2] is not None:
+                        # Skip not-a-number (e.g., division by zero) samples
+                        self._out_samples.put_nowait(sample)
         except grpc.RpcError as e:
             if e.code() != grpc.StatusCode.CANCELLED: raise                 # pylint: disable=no-member
             if e.details() != 'Locally cancelled by application!': raise    # pylint: disable=no-member
diff --git a/src/device/service/drivers/gnmi_openconfig/clone-yang-models.sh b/src/device/service/drivers/gnmi_openconfig/clone-yang-models.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b41fc6bbba60bf18c0cccebdb3536c3f96372c39
--- /dev/null
+++ b/src/device/service/drivers/gnmi_openconfig/clone-yang-models.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+BASE_PATH=~/tfs-ctrl/src/device/service/drivers/gnmi_openconfig
+GIT_BASE_PATH=${BASE_PATH}/git/openconfig
+
+rm -rf ${GIT_BASE_PATH}
+
+OC_PUBLIC_PATH=${GIT_BASE_PATH}/public
+mkdir -p ${OC_PUBLIC_PATH}
+git clone https://github.com/openconfig/public.git ${OC_PUBLIC_PATH}
+
+#OC_HERCULES_PATH=${GIT_BASE_PATH}/hercules
+#mkdir -p ${OC_HERCULES_PATH}
+#git clone https://github.com/openconfig/hercules.git ${OC_HERCULES_PATH}
diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/Component.py b/src/device/service/drivers/gnmi_openconfig/handlers/Component.py
index 5ac8754c6081245f79f28b89e026d5a859bc363a..e669872fb5e675ea5490b5e36376ceced4e296ee 100644
--- a/src/device/service/drivers/gnmi_openconfig/handlers/Component.py
+++ b/src/device/service/drivers/gnmi_openconfig/handlers/Component.py
@@ -12,45 +12,52 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
+import json, logging, re # libyang
 from typing import Any, Dict, List, Tuple
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from ._Handler import _Handler
+from .YangHandler import YangHandler
 
 LOGGER = logging.getLogger(__name__)
 
-PATH_IF_CTR = "/interfaces/interface[name={:s}]/state/counters/{:s}"
+PATH_IF_CTR = '/openconfig-interfaces:interfaces/interface[name={:s}]/state/counters/{:s}'
 
+#pylint: disable=abstract-method
 class ComponentHandler(_Handler):
     def get_resource_key(self) -> str: return '/endpoints/endpoint'
-    def get_path(self) -> str: return '/components/component'
+    def get_path(self) -> str: return '/openconfig-platform:components'
 
-    def parse(self, json_data : Dict) -> List[Tuple[str, Dict[str, Any]]]:
-        #LOGGER.info('json_data = {:s}'.format(json.dumps(json_data)))
-        json_component_list : List[Dict] = json_data.get('component', [])
-        response = []
-        for json_component in json_component_list:
-            #LOGGER.info('json_component = {:s}'.format(json.dumps(json_component)))
+    def parse(
+        self, json_data : Dict, yang_handler : YangHandler
+    ) -> List[Tuple[str, Dict[str, Any]]]:
+        LOGGER.debug('json_data = {:s}'.format(json.dumps(json_data)))
 
-            endpoint = {}
+        yang_components_path = self.get_path()
+        json_data_valid = yang_handler.parse_to_dict(yang_components_path, json_data, fmt='json')
 
-            component_type = json_component.get('state', {}).get('type')
-            if component_type is None: continue
-            component_type = component_type.replace('oc-platform-types:', '')
-            component_type = component_type.replace('openconfig-platform-types:', '')
-            if component_type not in {'PORT'}: continue
-            endpoint['type'] = '-'
+        entries = []
+        for component in json_data_valid['components']['component']:
+            LOGGER.debug('component={:s}'.format(str(component)))
+
+            component_name = component['name']
+            #component_config = component.get('config', {})
 
-            #LOGGER.info('PORT json_component = {:s}'.format(json.dumps(json_component)))
+            #yang_components : libyang.DContainer = yang_handler.get_data_path(yang_components_path)
+            #yang_component_path = 'component[name="{:s}"]'.format(component_name)
+            #yang_component : libyang.DContainer = yang_components.create_path(yang_component_path)
+            #yang_component.merge_data_dict(component, strict=True, validate=False)
 
-            component_name = json_component.get('name')
-            if component_name is None: continue
+            component_state = component.get('state', {})
+            component_type = component_state.get('type')
+            if component_type is None: continue
+            component_type = component_type.split(':')[-1]
+            if component_type not in {'PORT'}: continue
 
             # TODO: improve mapping between interface name and component name
             # By now, computed by time for the sake of saving time for the Hackfest.
-            interface_name = component_name.lower().replace('-port', '')
+            interface_name = re.sub(r'\-[pP][oO][rR][tT]', '', component_name)
 
-            endpoint['uuid'] = interface_name
+            endpoint = {'uuid': interface_name, 'type': '-'}
             endpoint['sample_types'] = {
                 KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED     : PATH_IF_CTR.format(interface_name, 'in-octets' ),
                 KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED  : PATH_IF_CTR.format(interface_name, 'out-octets'),
@@ -58,6 +65,6 @@ class ComponentHandler(_Handler):
                 KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED: PATH_IF_CTR.format(interface_name, 'out-pkts'  ),
             }
 
-            if len(endpoint) == 0: continue
-            response.append(('/endpoints/endpoint[{:s}]'.format(endpoint['uuid']), endpoint))
-        return response
+            entries.append(('/endpoints/endpoint[{:s}]'.format(endpoint['uuid']), endpoint))
+
+        return entries
diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/Interface.py b/src/device/service/drivers/gnmi_openconfig/handlers/Interface.py
index e97855aa8b97fc855b07848f1a7f0c7e93717a70..03cfc6ff15e0490aee99be89c3ff1e9327dddf15 100644
--- a/src/device/service/drivers/gnmi_openconfig/handlers/Interface.py
+++ b/src/device/service/drivers/gnmi_openconfig/handlers/Interface.py
@@ -12,20 +12,23 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import json, logging
+import json, libyang, logging
 from typing import Any, Dict, List, Tuple
 from ._Handler import _Handler
-from .Tools import dict_get_first
+from .Tools import get_bool, get_int, get_str
+from .YangHandler import YangHandler
 
 LOGGER = logging.getLogger(__name__)
 
 class InterfaceHandler(_Handler):
-    def get_resource_key(self) -> str: return '/interface'
-    def get_path(self) -> str: return '/interfaces/interface'
+    def get_resource_key(self) -> str: return '/interface/subinterface'
+    def get_path(self) -> str: return '/openconfig-interfaces:interfaces'
 
-    def compose(self, resource_key : str, resource_value : Dict, delete : bool = False) -> Tuple[str, str]:
-        if_name          = str (resource_value['name'                         ])    # ethernet-1/1
-        sif_index        = int (resource_value.get('sub_if_index'       , 0   ))    # 0
+    def compose(
+        self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False
+    ) -> Tuple[str, str]:
+        if_name   = get_str(resource_value, 'name'    )  # ethernet-1/1
+        sif_index = get_int(resource_value, 'index', 0)  # 0
 
         if delete:
             PATH_TMPL = '/interfaces/interface[name={:s}]/subinterfaces/subinterface[index={:d}]'
@@ -33,216 +36,159 @@ class InterfaceHandler(_Handler):
             str_data = json.dumps({})
             return str_path, str_data
 
-        if_enabled       = bool(resource_value.get('enabled'            , True))    # True/False
-        sif_enabled      = bool(resource_value.get('sub_if_enabled'     , True))    # True/False
-        sif_ipv4_enabled = bool(resource_value.get('sub_if_ipv4_enabled', True))    # True/False
-        sif_ipv4_address = str (resource_value['sub_if_ipv4_address'          ])    # 172.16.0.1
-        sif_ipv4_prefix  = int (resource_value['sub_if_ipv4_prefix'           ])    # 24
+        enabled        = get_bool(resource_value, 'enabled',  True) # True/False
+        #if_type        = get_str (resource_value, 'type'         ) # 'l3ipvlan'
+        vlan_id        = get_int (resource_value, 'vlan_id',      ) # 127
+        address_ip     = get_str (resource_value, 'address_ip'    ) # 172.16.0.1
+        address_prefix = get_int (resource_value, 'address_prefix') # 24
+        mtu            = get_int (resource_value, 'mtu'           ) # 1500
+
+        yang_ifs : libyang.DContainer = yang_handler.get_data_path('/openconfig-interfaces:interfaces')
+        yang_if_path = 'interface[name="{:s}"]'.format(if_name)
+        yang_if : libyang.DContainer = yang_ifs.create_path(yang_if_path)
+        yang_if.create_path('config/name',    if_name   )
+        if enabled is not None: yang_if.create_path('config/enabled', enabled)
+        if mtu     is not None: yang_if.create_path('config/mtu',     mtu)
+
+        yang_sifs : libyang.DContainer = yang_if.create_path('subinterfaces')
+        yang_sif_path = 'subinterface[index="{:d}"]'.format(sif_index)
+        yang_sif : libyang.DContainer = yang_sifs.create_path(yang_sif_path)
+        yang_sif.create_path('config/index', sif_index)
+        if enabled is not None: yang_sif.create_path('config/enabled', enabled)
+
+        if vlan_id is not None:
+            yang_subif_vlan : libyang.DContainer = yang_sif.create_path('openconfig-vlan:vlan')
+            yang_subif_vlan.create_path('match/single-tagged/config/vlan-id', vlan_id)
+
+        yang_ipv4 : libyang.DContainer = yang_sif.create_path('openconfig-if-ip:ipv4')
+        if enabled is not None: yang_ipv4.create_path('config/enabled', enabled)
+
+        if address_ip is not None and address_prefix is not None:
+            yang_ipv4_addrs : libyang.DContainer = yang_ipv4.create_path('addresses')
+            yang_ipv4_addr_path = 'address[ip="{:s}"]'.format(address_ip)
+            yang_ipv4_addr : libyang.DContainer = yang_ipv4_addrs.create_path(yang_ipv4_addr_path)
+            yang_ipv4_addr.create_path('config/ip',            address_ip)
+            yang_ipv4_addr.create_path('config/prefix-length', address_prefix)
+            if mtu is not None: yang_ipv4_addr.create_path('config/mtu', mtu)
 
         str_path = '/interfaces/interface[name={:s}]'.format(if_name)
-        str_data = json.dumps({
-            'name': if_name,
-            'config': {'name': if_name, 'enabled': if_enabled},
-            'subinterfaces': {
-                'subinterface': {
-                    'index': sif_index,
-                    'config': {'index': sif_index, 'enabled': sif_enabled},
-                    'ipv4': {
-                        'config': {'enabled': sif_ipv4_enabled},
-                        'addresses': {
-                            'address': {
-                                'ip': sif_ipv4_address,
-                                'config': {'ip': sif_ipv4_address, 'prefix_length': sif_ipv4_prefix},
-                            }
-                        }
-                    }
-                }
-            }
-        })
+        str_data = yang_if.print_mem('json')
+        json_data = json.loads(str_data)
+        json_data = json_data['openconfig-interfaces:interface'][0]
+        str_data = json.dumps(json_data)
         return str_path, str_data
 
-    def parse(self, json_data : Dict) -> List[Tuple[str, Dict[str, Any]]]:
-        #LOGGER.info('json_data = {:s}'.format(json.dumps(json_data)))
-        json_interface_list : List[Dict] = json_data.get('interface', [])
-
-        response = []
-        for json_interface in json_interface_list:
-            #LOGGER.info('json_interface = {:s}'.format(json.dumps(json_interface)))
-
-            interface = {}
-
-            interface_name = json_interface.get('name')
-            if interface_name is None:
-                LOGGER.info('DISCARDED json_interface = {:s}'.format(json.dumps(json_interface)))
-                continue
-            interface['name'] = interface_name
-
-            CONFIG_FIELDS = ('config', 'openconfig-interface:config', 'oci:config')
-            json_config : Dict = dict_get_first(json_interface, CONFIG_FIELDS, default={})
-
-            STATE_FIELDS = ('state', 'openconfig-interface:state', 'oci:state')
-            json_state : Dict = dict_get_first(json_interface, STATE_FIELDS, default={})
-
-            interface_type = json_config.get('type')
-            if interface_type is None: interface_type = json_state.get('type')
-            if interface_type is None:
-                LOGGER.info('DISCARDED json_interface = {:s}'.format(json.dumps(json_interface)))
-                continue
-            interface_type = interface_type.replace('ianaift:', '')
-            interface_type = interface_type.replace('iana-if-type:', '')
-            interface['type'] = interface_type
-
-            interface_mtu = json_config.get('mtu')
-            if interface_mtu is None: interface_mtu = json_state.get('mtu')
-            if interface_mtu is not None: interface['mtu'] = int(interface_mtu)
-
-            interface_enabled = json_config.get('enabled')
-            if interface_enabled is None: interface_enabled = json_state.get('enabled')
-            interface['enabled'] = False if interface_enabled is None else bool(interface_enabled)
-
-            interface_management = json_config.get('management')
-            if interface_management is None: interface_management = json_state.get('management')
-            interface['management'] = False if interface_management is None else bool(interface_management)
-
-            interface_descr = json_interface.get('config', {}).get('description')
-            if interface_descr is not None: interface['description'] = interface_descr
-
-            json_subinterfaces = json_interface.get('subinterfaces', {})
-            json_subinterface_list : List[Dict] = json_subinterfaces.get('subinterface', [])
-
-            for json_subinterface in json_subinterface_list:
-                #LOGGER.info('json_subinterface = {:s}'.format(json.dumps(json_subinterface)))
-
-                subinterface = {}
-
-                subinterface_index = json_subinterface.get('state', {}).get('index')
-                if subinterface_index is None: continue
-                subinterface['index'] = int(subinterface_index)
-
-                subinterface_name = json_subinterface.get('state', {}).get('name')
-                if subinterface_name is None: continue
-                subinterface['name'] = subinterface_name
-
-                subinterface_enabled = json_subinterface.get('state', {}).get('enabled', False)
-                subinterface['enabled'] = bool(subinterface_enabled)
-
-                VLAN_FIELDS = ('vlan', 'openconfig-vlan:vlan', 'ocv:vlan')
-                json_vlan = dict_get_first(json_subinterface, VLAN_FIELDS, default={})
-
-                MATCH_FIELDS = ('match', 'openconfig-vlan:match', 'ocv:match')
-                json_vlan = dict_get_first(json_vlan, MATCH_FIELDS, default={})
-
-                SIN_TAG_FIELDS = ('single-tagged', 'openconfig-vlan:single-tagged', 'ocv:single-tagged')
-                json_vlan = dict_get_first(json_vlan, SIN_TAG_FIELDS, default={})
-
-                CONFIG_FIELDS = ('config', 'openconfig-vlan:config', 'ocv:config')
-                json_vlan = dict_get_first(json_vlan, CONFIG_FIELDS, default={})
-
-                VLAN_ID_FIELDS = ('vlan-id', 'openconfig-vlan:vlan-id', 'ocv:vlan-id')
-                subinterface_vlan_id = dict_get_first(json_vlan, VLAN_ID_FIELDS)
-                if subinterface_vlan_id is not None: subinterface['vlan_id'] = subinterface_vlan_id
-
-
-                # TODO: implement support for multiple IP addresses per subinterface
-
-                IPV4_FIELDS = ('ipv4', 'openconfig-if-ip:ipv4', 'ociip:ipv4')
-                json_ipv4 = dict_get_first(json_subinterface, IPV4_FIELDS, default={})
-                
-                IPV4_ADDRESSES_FIELDS = ('addresses', 'openconfig-if-ip:addresses', 'ociip:addresses')
-                json_ipv4_addresses = dict_get_first(json_ipv4, IPV4_ADDRESSES_FIELDS, default={})
-
-                IPV4_ADDRESS_FIELDS = ('address', 'openconfig-if-ip:address', 'ociip:address')
-                json_ipv4_address_list : List[Dict] = dict_get_first(json_ipv4_addresses, IPV4_ADDRESS_FIELDS, default=[])
-
-                #ipv4_addresses = []
-                for json_ipv4_address in json_ipv4_address_list:
-                    #LOGGER.info('json_ipv4_address = {:s}'.format(json.dumps(json_ipv4_address)))
-
-                    STATE_FIELDS = ('state', 'openconfig-if-ip:state', 'ociip:state')
-                    json_ipv4_address_state = dict_get_first(json_ipv4_address, STATE_FIELDS, default={})
-
-                    #ipv4_address = {}
-
-                    #ORIGIN_FIELDS = ('origin', 'openconfig-if-ip:origin', 'ociip:origin')
-                    #ipv4_address_origin = dict_get_first(json_ipv4_address_state, ORIGIN_FIELDS, default={})
-                    #if ipv4_address_origin is not None: ipv4_address['origin'] = ipv4_address_origin
-
-                    IP_FIELDS = ('ip', 'openconfig-if-ip:ip', 'ociip:ip')
-                    ipv4_address_ip = dict_get_first(json_ipv4_address_state, IP_FIELDS)
-                    #if ipv4_address_ip is not None: ipv4_address['address_ip'] = ipv4_address_ip
-                    if ipv4_address_ip is not None: subinterface['address_ip'] = ipv4_address_ip
-
-                    PREFIX_FIELDS = ('prefix-length', 'openconfig-if-ip:prefix-length', 'ociip:prefix-length')
-                    ipv4_address_prefix = dict_get_first(json_ipv4_address_state, PREFIX_FIELDS)
-                    #if ipv4_address_prefix is not None: ipv4_address['address_prefix'] = int(ipv4_address_prefix)
-                    if ipv4_address_prefix is not None: subinterface['address_prefix'] = int(ipv4_address_prefix)
-
-                    #if len(ipv4_address) == 0: continue
-                    #ipv4_addresses.append(ipv4_address)
-
-                #subinterface['ipv4_addresses'] = ipv4_addresses
-
-                if len(subinterface) == 0: continue
-                resource_key = '/interface[{:s}]/subinterface[{:s}]'.format(interface['name'], str(subinterface['index']))
-                response.append((resource_key, subinterface))
-
-            if len(interface) == 0: continue
-            response.append(('/interface[{:s}]'.format(interface['name']), interface))
-
-        return response
-
-    def parse_counters(self, json_data : Dict) -> List[Tuple[str, Dict[str, Any]]]:
-        LOGGER.info('[parse_counters] json_data = {:s}'.format(json.dumps(json_data)))
-        json_interface_list : List[Dict] = json_data.get('interface', [])
-
-        response = []
-        for json_interface in json_interface_list:
-            LOGGER.info('[parse_counters] json_interface = {:s}'.format(json.dumps(json_interface)))
-
-            interface = {}
-
-            NAME_FIELDS = ('name', 'openconfig-interface:name', 'oci:name')
-            interface_name = dict_get_first(json_interface, NAME_FIELDS)
-            if interface_name is None: continue
-            interface['name'] = interface_name
-
-            STATE_FIELDS = ('state', 'openconfig-interface:state', 'oci:state')
-            json_state = dict_get_first(json_interface, STATE_FIELDS, default={})
-
-            COUNTERS_FIELDS = ('counters', 'openconfig-interface:counters', 'oci:counters')
-            json_counters = dict_get_first(json_state, COUNTERS_FIELDS, default={})
-
-            IN_PKTS_FIELDS = ('in-pkts', 'openconfig-interface:in-pkts', 'oci:in-pkts')
-            interface_in_pkts = dict_get_first(json_counters, IN_PKTS_FIELDS)
-            if interface_in_pkts is not None: interface['in-pkts'] = int(interface_in_pkts)
-
-            IN_OCTETS_FIELDS = ('in-octets', 'openconfig-interface:in-octets', 'oci:in-octets')
-            interface_in_octets = dict_get_first(json_counters, IN_OCTETS_FIELDS)
-            if interface_in_octets is not None: interface['in-octets'] = int(interface_in_octets)
-
-            IN_ERRORS_FIELDS = ('in-errors', 'openconfig-interface:in-errors', 'oci:in-errors')
-            interface_in_errors = dict_get_first(json_counters, IN_ERRORS_FIELDS)
-            if interface_in_errors is not None: interface['in-errors'] = int(interface_in_errors)
-
-            OUT_OCTETS_FIELDS = ('out-octets', 'openconfig-interface:out-octets', 'oci:out-octets')
-            interface_out_octets = dict_get_first(json_counters, OUT_OCTETS_FIELDS)
-            if interface_out_octets is not None: interface['out-octets'] = int(interface_out_octets)
-
-            OUT_PKTS_FIELDS = ('out-pkts', 'openconfig-interface:out-pkts', 'oci:out-pkts')
-            interface_out_pkts = dict_get_first(json_counters, OUT_PKTS_FIELDS)
-            if interface_out_pkts is not None: interface['out-pkts'] = int(interface_out_pkts)
-
-            OUT_ERRORS_FIELDS = ('out-errors', 'openconfig-interface:out-errors', 'oci:out-errors')
-            interface_out_errors = dict_get_first(json_counters, OUT_ERRORS_FIELDS)
-            if interface_out_errors is not None: interface['out-errors'] = int(interface_out_errors)
-
-            OUT_DISCARDS_FIELDS = ('out-discards', 'openconfig-interface:out-discards', 'oci:out-discards')
-            interface_out_discards = dict_get_first(json_counters, OUT_DISCARDS_FIELDS)
-            if interface_out_discards is not None: interface['out-discards'] = int(interface_out_discards)
-
-            #LOGGER.info('[parse_counters] interface = {:s}'.format(str(interface)))
-
-            if len(interface) == 0: continue
-            response.append(('/interface[{:s}]'.format(interface['name']), interface))
-
-        return response
+    def parse(
+        self, json_data : Dict, yang_handler : YangHandler
+    ) -> List[Tuple[str, Dict[str, Any]]]:
+        LOGGER.debug('json_data = {:s}'.format(json.dumps(json_data)))
+
+        yang_interfaces_path = self.get_path()
+        json_data_valid = yang_handler.parse_to_dict(yang_interfaces_path, json_data, fmt='json')
+
+        entries = []
+        for interface in json_data_valid['interfaces']['interface']:
+            LOGGER.debug('interface={:s}'.format(str(interface)))
+
+            interface_name = interface['name']
+            interface_config = interface.get('config', {})
+
+            #yang_interfaces : libyang.DContainer = yang_handler.get_data_path(yang_interfaces_path)
+            #yang_interface_path = 'interface[name="{:s}"]'.format(interface_name)
+            #yang_interface : libyang.DContainer = yang_interfaces.create_path(yang_interface_path)
+            #yang_interface.merge_data_dict(interface, strict=True, validate=False)
+
+            interface_state = interface.get('state', {})
+            interface_type = interface_state.get('type')
+            if interface_type is None: continue
+            interface_type = interface_type.split(':')[-1]
+            if interface_type not in {'ethernetCsmacd'}: continue
+
+            _interface = {
+                'name'         : interface_name,
+                'type'         : interface_type,
+                'mtu'          : interface_state['mtu'],
+                'admin-status' : interface_state['admin-status'],
+                'oper-status'  : interface_state['oper-status'],
+                'management'   : interface_state['management'],
+            }
+            if not interface_state['management'] and 'ifindex' in interface_state:
+                _interface['ifindex'] = interface_state['ifindex']
+            if 'description' in interface_config:
+                _interface['description'] = interface_config['description']
+            if 'enabled' in interface_config:
+                _interface['enabled'] = interface_config['enabled']
+            if 'hardware-port' in interface_state:
+                _interface['hardware-port'] = interface_state['hardware-port']
+            if 'transceiver' in interface_state:
+                _interface['transceiver'] = interface_state['transceiver']
+
+            entry_interface_key = '/interface[{:s}]'.format(interface_name)
+            entries.append((entry_interface_key, _interface))
+
+            if interface_type == 'ethernetCsmacd':
+                ethernet_state = interface['ethernet']['state']
+
+                _ethernet = {
+                    'mac-address'           : ethernet_state['mac-address'],
+                    'hw-mac-address'        : ethernet_state['hw-mac-address'],
+                    'port-speed'            : ethernet_state['port-speed'].split(':')[-1],
+                    'negotiated-port-speed' : ethernet_state['negotiated-port-speed'].split(':')[-1],
+                }
+                entry_ethernet_key = '{:s}/ethernet'.format(entry_interface_key)
+                entries.append((entry_ethernet_key, _ethernet))
+
+            subinterfaces = interface.get('subinterfaces', {}).get('subinterface', [])
+            for subinterface in subinterfaces:
+                LOGGER.debug('subinterface={:s}'.format(str(subinterface)))
+
+                subinterface_index = subinterface['index']
+                subinterface_state = subinterface.get('state', {})
+
+                _subinterface = {'index': subinterface_index}
+                if 'name' in subinterface_state:
+                    _subinterface['name'] = subinterface_state['name']
+                if 'enabled' in subinterface_state:
+                    _subinterface['enabled'] = subinterface_state['enabled']
+
+                if 'vlan' in subinterface:
+                    vlan = subinterface['vlan']
+                    vlan_match = vlan['match']
+
+                    single_tagged = vlan_match.pop('single-tagged', None)
+                    if single_tagged is not None:
+                        single_tagged_config = single_tagged['config']
+                        vlan_id = single_tagged_config['vlan-id']
+                        _subinterface['vlan_id'] = vlan_id
+
+                    if len(vlan_match) > 0:
+                        raise Exception('Unsupported VLAN schema: {:s}'.format(str(vlan)))
+
+                ipv4_addresses = subinterface.get('ipv4', {}).get('addresses', {}).get('address', [])
+                if len(ipv4_addresses) > 1:
+                    raise Exception('Multiple IPv4 Addresses not supported: {:s}'.format(str(ipv4_addresses)))
+                for ipv4_address in ipv4_addresses:
+                    LOGGER.debug('ipv4_address={:s}'.format(str(ipv4_address)))
+                    _subinterface['address_ip'] = ipv4_address['ip']
+                    ipv4_address_state = ipv4_address.get('state', {})
+                    #if 'origin' in ipv4_address_state:
+                    #    _subinterface['origin'] = ipv4_address_state['origin']
+                    if 'prefix-length' in ipv4_address_state:
+                        _subinterface['address_prefix'] = ipv4_address_state['prefix-length']
+
+                ipv6_addresses = subinterface.get('ipv6', {}).get('addresses', {}).get('address', [])
+                if len(ipv6_addresses) > 1:
+                    raise Exception('Multiple IPv6 Addresses not supported: {:s}'.format(str(ipv6_addresses)))
+                for ipv6_address in ipv6_addresses:
+                    LOGGER.debug('ipv6_address={:s}'.format(str(ipv6_address)))
+                    _subinterface['address_ipv6'] = ipv6_address['ip']
+                    ipv6_address_state = ipv6_address.get('state', {})
+                    #if 'origin' in ipv6_address_state:
+                    #    _subinterface['origin_ipv6'] = ipv6_address_state['origin']
+                    if 'prefix-length' in ipv6_address_state:
+                        _subinterface['address_prefix_ipv6'] = ipv6_address_state['prefix-length']
+
+                entry_subinterface_key = '{:s}/subinterface[{:d}]'.format(entry_interface_key, subinterface_index)
+                entries.append((entry_subinterface_key, _subinterface))
+
+        return entries
diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/InterfaceCounter.py b/src/device/service/drivers/gnmi_openconfig/handlers/InterfaceCounter.py
index 502868c2204553d30e7cdd529184cf994d03fd21..ae6d86c43addf4f3083b9900796365b3e0601620 100644
--- a/src/device/service/drivers/gnmi_openconfig/handlers/InterfaceCounter.py
+++ b/src/device/service/drivers/gnmi_openconfig/handlers/InterfaceCounter.py
@@ -12,69 +12,53 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import json, logging
+import json, libyang, logging
 from typing import Any, Dict, List, Tuple
 from ._Handler import _Handler
-from .Tools import dict_get_first
+from .YangHandler import YangHandler
 
 LOGGER = logging.getLogger(__name__)
 
+#pylint: disable=abstract-method
 class InterfaceCounterHandler(_Handler):
     def get_resource_key(self) -> str: return '/interface/counters'
-    def get_path(self) -> str: return '/interfaces/interface/state/counters'
-
-    def parse(self, json_data : Dict) -> List[Tuple[str, Dict[str, Any]]]:
-        LOGGER.info('[parse] json_data = {:s}'.format(json.dumps(json_data)))
-        json_interface_list : List[Dict] = json_data.get('interface', [])
-
-        response = []
-        for json_interface in json_interface_list:
-            LOGGER.info('[parse] json_interface = {:s}'.format(json.dumps(json_interface)))
-
-            interface = {}
-
-            NAME_FIELDS = ('name', 'openconfig-interface:name', 'oci:name')
-            interface_name = dict_get_first(json_interface, NAME_FIELDS)
-            if interface_name is None: continue
-            interface['name'] = interface_name
-
-            STATE_FIELDS = ('state', 'openconfig-interface:state', 'oci:state')
-            json_state = dict_get_first(json_interface, STATE_FIELDS, default={})
-
-            COUNTERS_FIELDS = ('counters', 'openconfig-interface:counters', 'oci:counters')
-            json_counters = dict_get_first(json_state, COUNTERS_FIELDS, default={})
-
-            IN_PKTS_FIELDS = ('in-pkts', 'openconfig-interface:in-pkts', 'oci:in-pkts')
-            interface_in_pkts = dict_get_first(json_counters, IN_PKTS_FIELDS)
-            if interface_in_pkts is not None: interface['in-pkts'] = int(interface_in_pkts)
-
-            IN_OCTETS_FIELDS = ('in-octets', 'openconfig-interface:in-octets', 'oci:in-octets')
-            interface_in_octets = dict_get_first(json_counters, IN_OCTETS_FIELDS)
-            if interface_in_octets is not None: interface['in-octets'] = int(interface_in_octets)
-
-            IN_ERRORS_FIELDS = ('in-errors', 'openconfig-interface:in-errors', 'oci:in-errors')
-            interface_in_errors = dict_get_first(json_counters, IN_ERRORS_FIELDS)
-            if interface_in_errors is not None: interface['in-errors'] = int(interface_in_errors)
-
-            OUT_OCTETS_FIELDS = ('out-octets', 'openconfig-interface:out-octets', 'oci:out-octets')
-            interface_out_octets = dict_get_first(json_counters, OUT_OCTETS_FIELDS)
-            if interface_out_octets is not None: interface['out-octets'] = int(interface_out_octets)
-
-            OUT_PKTS_FIELDS = ('out-pkts', 'openconfig-interface:out-pkts', 'oci:out-pkts')
-            interface_out_pkts = dict_get_first(json_counters, OUT_PKTS_FIELDS)
-            if interface_out_pkts is not None: interface['out-pkts'] = int(interface_out_pkts)
-
-            OUT_ERRORS_FIELDS = ('out-errors', 'openconfig-interface:out-errors', 'oci:out-errors')
-            interface_out_errors = dict_get_first(json_counters, OUT_ERRORS_FIELDS)
-            if interface_out_errors is not None: interface['out-errors'] = int(interface_out_errors)
-
-            OUT_DISCARDS_FIELDS = ('out-discards', 'openconfig-interface:out-discards', 'oci:out-discards')
-            interface_out_discards = dict_get_first(json_counters, OUT_DISCARDS_FIELDS)
-            if interface_out_discards is not None: interface['out-discards'] = int(interface_out_discards)
-
-            #LOGGER.info('[parse] interface = {:s}'.format(str(interface)))
-
-            if len(interface) == 0: continue
-            response.append(('/interface[{:s}]'.format(interface['name']), interface))
-
-        return response
+    def get_path(self) -> str: return '/openconfig-interfaces:interfaces/interface/state/counters'
+
+    def parse(
+        self, json_data : Dict, yang_handler : YangHandler
+    ) -> List[Tuple[str, Dict[str, Any]]]:
+        LOGGER.debug('json_data = {:s}'.format(json.dumps(json_data)))
+
+        yang_interfaces_path = self.get_path()
+        json_data_valid = yang_handler.parse_to_dict(yang_interfaces_path, json_data, fmt='json')
+
+        entries = []
+        for interface in json_data_valid['interfaces']['interface']:
+            LOGGER.debug('interface={:s}'.format(str(interface)))
+
+            interface_name = interface['name']
+            interface_counters = interface.get('state', {}).get('counters', {})
+            _interface = {
+                'name'              : interface_name,
+                'in-broadcast-pkts' : interface_counters['in_broadcast_pkts' ],
+                'in-discards'       : interface_counters['in_discards'       ],
+                'in-errors'         : interface_counters['in_errors'         ],
+                'in-fcs-errors'     : interface_counters['in_fcs_errors'     ],
+                'in-multicast-pkts' : interface_counters['in_multicast_pkts' ],
+                'in-octets'         : interface_counters['in_octets'         ],
+                'in-pkts'           : interface_counters['in_pkts'           ],
+                'in-unicast-pkts'   : interface_counters['in_unicast_pkts'   ],
+                'out-broadcast-pkts': interface_counters['out_broadcast_pkts'],
+                'out-discards'      : interface_counters['out_discards'      ],
+                'out-errors'        : interface_counters['out_errors'        ],
+                'out-multicast-pkts': interface_counters['out_multicast_pkts'],
+                'out-octets'        : interface_counters['out_octets'        ],
+                'out-pkts'          : interface_counters['out_pkts'          ],
+                'out-unicast-pkts'  : interface_counters['out_unicast_pkts'  ],
+            }
+            LOGGER.debug('interface = {:s}'.format(str(interface)))
+
+            entry_interface_key = '/interface[{:s}]'.format(interface_name)
+            entries.append((entry_interface_key, _interface))
+
+        return entries
diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstance.py b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstance.py
index d522eec964b2c6beffe953075411dbba2594eaa4..f1d1c56b4023a1250263aaf0995975d815eb5fec 100644
--- a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstance.py
+++ b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstance.py
@@ -12,18 +12,39 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import json, logging
+import json, libyang, logging
 from typing import Any, Dict, List, Tuple
 from ._Handler import _Handler
+from .Tools import get_str
+from .YangHandler import YangHandler
 
 LOGGER = logging.getLogger(__name__)
 
+MAP_NETWORK_INSTANCE_TYPE = {
+    # special routing instance; acts as default/global routing instance for a network device
+    'DEFAULT': 'openconfig-network-instance-types:DEFAULT_INSTANCE',
+
+    # private L3-only routing instance; formed of one or more RIBs
+    'L3VRF': 'openconfig-network-instance-types:L3VRF',
+
+    # private L2-only switch instance; formed of one or more L2 forwarding tables
+    'L2VSI': 'openconfig-network-instance-types:L2VSI',
+
+    # private L2-only forwarding instance; point to point connection between two endpoints
+    'L2P2P': 'openconfig-network-instance-types:L2P2P',
+
+    # private Layer 2 and Layer 3 forwarding instance
+    'L2L3': 'openconfig-network-instance-types:L2L3',
+}
+
 class NetworkInstanceHandler(_Handler):
     def get_resource_key(self) -> str: return '/network_instance'
-    def get_path(self) -> str: return '/network-instances/network-instance'
+    def get_path(self) -> str: return '/openconfig-network-instance:network-instances'
 
-    def compose(self, resource_key : str, resource_value : Dict, delete : bool = False) -> Tuple[str, str]:
-        ni_name   = str(resource_value['name'])   # test-svc
+    def compose(
+        self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False
+    ) -> Tuple[str, str]:
+        ni_name = get_str(resource_value, 'name') # test-svc
 
         if delete:
             PATH_TMPL = '/network-instances/network-instance[name={:s}]'
@@ -31,32 +52,124 @@ class NetworkInstanceHandler(_Handler):
             str_data = json.dumps({})
             return str_path, str_data
 
-        ni_type   = str(resource_value['type'])   # L3VRF / L2VSI / ...
+        ni_type = get_str(resource_value, 'type') # L3VRF / L2VSI / ...
+        ni_type = MAP_NETWORK_INSTANCE_TYPE.get(ni_type, ni_type)
+
+        str_path = '/network-instances/network-instance[name={:s}]'.format(ni_name)
+        #str_data = json.dumps({
+        #    'name': ni_name,
+        #    'config': {'name': ni_name, 'type': ni_type},
+        #})
 
-        # not works: [FailedPrecondition] unsupported identifier 'DIRECTLY_CONNECTED'
-        #protocols = [self._compose_directly_connected()]
+        yang_nis : libyang.DContainer = yang_handler.get_data_path('/openconfig-network-instance:network-instances')
+        yang_ni_path = 'network-instance[name="{:s}"]'.format(ni_name)
+        yang_ni : libyang.DContainer = yang_nis.create_path(yang_ni_path)
+        yang_ni.create_path('config/name', ni_name)
+        yang_ni.create_path('config/type', ni_type)
 
-        MAP_OC_NI_TYPE = {
-            'L3VRF': 'openconfig-network-instance-types:L3VRF',
-        }
-        ni_type = MAP_OC_NI_TYPE.get(ni_type, ni_type)
+        # 'DIRECTLY_CONNECTED' is implicitly added
+        #'protocols': {'protocol': protocols},
 
-        str_path = '/network-instances/network-instance[name={:s}]'.format(ni_name)
-        str_data = json.dumps({
-            'name': ni_name,
-            'config': {'name': ni_name, 'type': ni_type},
-            #'protocols': {'protocol': protocols},
-        })
+        str_data = yang_ni.print_mem('json')
+        json_data = json.loads(str_data)
+        json_data = json_data['openconfig-network-instance:network-instance'][0]
+        str_data = json.dumps(json_data)
         return str_path, str_data
 
-    def _compose_directly_connected(self, name=None, enabled=True) -> Dict:
-        identifier = 'DIRECTLY_CONNECTED'
-        if name is None: name = 'DIRECTLY_CONNECTED'
-        return {
-            'identifier': identifier, 'name': name,
-            'config': {'identifier': identifier, 'name': name, 'enabled': enabled},
-        }
-
-    def parse(self, json_data : Dict) -> List[Tuple[str, Dict[str, Any]]]:
-        response = []
-        return response
+    def parse(
+        self, json_data : Dict, yang_handler : YangHandler
+    ) -> List[Tuple[str, Dict[str, Any]]]:
+        LOGGER.debug('json_data = {:s}'.format(json.dumps(json_data)))
+
+        # Arista Parsing Fixes:
+        # - Default instance comes with mpls/signaling-protocols/rsvp-te/global/hellos/state/hello-interval set to 0
+        #   overwrite with .../hellos/config/hello-interval
+        network_instances = json_data.get('openconfig-network-instance:network-instance', [])
+        for network_instance in network_instances:
+            if network_instance['name'] != 'default': continue
+            mpls_rsvp_te = network_instance.get('mpls', {}).get('signaling-protocols', {}).get('rsvp-te', {})
+            mpls_rsvp_te_hellos = mpls_rsvp_te.get('global', {}).get('hellos', {})
+            hello_interval = mpls_rsvp_te_hellos.get('config', {}).get('hello-interval', 9000)
+            mpls_rsvp_te_hellos.get('state', {})['hello-interval'] = hello_interval
+
+        yang_network_instances_path = self.get_path()
+        json_data_valid = yang_handler.parse_to_dict(yang_network_instances_path, json_data, fmt='json', strict=False)
+
+        entries = []
+        for network_instance in json_data_valid['network-instances']['network-instance']:
+            LOGGER.debug('network_instance={:s}'.format(str(network_instance)))
+            ni_name = network_instance['name']
+
+            ni_config = network_instance['config']
+            ni_type = ni_config['type'].split(':')[-1]
+
+            _net_inst = {'name': ni_name, 'type': ni_type}
+            entry_net_inst_key = '/network_instance[{:s}]'.format(ni_name)
+            entries.append((entry_net_inst_key, _net_inst))
+
+            ni_interfaces = network_instance.get('interfaces', {}).get('interface', [])
+            for ni_interface in ni_interfaces:
+                #ni_if_id     = ni_interface['id']
+                ni_if_config = ni_interface['config']
+                ni_if_name   = ni_if_config['interface']
+                ni_sif_index = ni_if_config['subinterface']
+                ni_if_id     = '{:s}.{:d}'.format(ni_if_name, ni_sif_index)
+
+                _interface = {'name': ni_name, 'id': ni_if_id, 'if_name': ni_if_name, 'sif_index': ni_sif_index}
+                entry_interface_key = '{:s}/interface[{:s}]'.format(entry_net_inst_key, ni_if_id)
+                entries.append((entry_interface_key, _interface))
+
+            ni_protocols = network_instance.get('protocols', {}).get('protocol', [])
+            for ni_protocol in ni_protocols:
+                ni_protocol_id = ni_protocol['identifier'].split(':')[-1]
+                ni_protocol_name = ni_protocol['name']
+
+                _protocol = {'name': ni_name, 'identifier': ni_protocol_id, 'protocol_name': ni_protocol_name}
+                entry_protocol_key = '{:s}/protocols[{:s}]'.format(entry_net_inst_key, ni_protocol_id)
+                entries.append((entry_protocol_key, _protocol))
+
+                if ni_protocol_id == 'STATIC':
+                    static_routes = ni_protocol.get('static-routes', {}).get('static', [])
+                    for static_route in static_routes:
+                        static_route_prefix = static_route['prefix']
+                        for next_hop in static_route.get('next-hops', {}).get('next-hop', []):
+                            static_route_metric = next_hop['config']['metric']
+                            _static_route = {
+                                'prefix'  : static_route_prefix,
+                                'index'   : next_hop['index'],
+                                'next_hop': next_hop['config']['next-hop'],
+                                'metric'  : static_route_metric,
+                            }
+                            _static_route.update(_protocol)
+                            entry_static_route_key = '{:s}/static_route[{:s}:{:d}]'.format(
+                                entry_protocol_key, static_route_prefix, static_route_metric
+                            )
+                            entries.append((entry_static_route_key, _static_route))
+
+            ni_tables = network_instance.get('tables', {}).get('table', [])
+            for ni_table in ni_tables:
+                ni_table_protocol = ni_table['protocol'].split(':')[-1]
+                ni_table_address_family = ni_table['address-family'].split(':')[-1]
+                _table = {'protocol': ni_table_protocol, 'address_family': ni_table_address_family}
+                entry_table_key = '{:s}/table[{:s},{:s}]'.format(
+                    entry_net_inst_key, ni_table_protocol, ni_table_address_family
+                )
+                entries.append((entry_table_key, _table))
+
+            ni_vlans = network_instance.get('vlans', {}).get('vlan', [])
+            for ni_vlan in ni_vlans:
+                ni_vlan_id = ni_vlan['vlan-id']
+
+                #ni_vlan_config = ni_vlan['config']
+                ni_vlan_state = ni_vlan['state']
+                ni_vlan_name = ni_vlan_state['name']
+
+                _members = [
+                    member['state']['interface']
+                    for member in ni_vlan.get('members', {}).get('member', [])
+                ]
+                _vlan = {'vlan_id': ni_vlan_id, 'name': ni_vlan_name, 'members': _members}
+                entry_vlan_key = '{:s}/vlan[{:d}]'.format(entry_net_inst_key, ni_vlan_id)
+                entries.append((entry_vlan_key, _vlan))
+
+        return entries
diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceInterface.py b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceInterface.py
index cc22618bc4179ba6eabdd2b4232b8cbfb92f1587..5e5ea9bf0bdc38b09d3959e8b6ecfc3534319817 100644
--- a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceInterface.py
+++ b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceInterface.py
@@ -12,35 +12,62 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import json, logging
+import json, libyang, logging
 from typing import Any, Dict, List, Tuple
 from ._Handler import _Handler
+from .Tools import get_int, get_str
+from .YangHandler import YangHandler
 
 LOGGER = logging.getLogger(__name__)
 
+IS_CEOS = True
+
 class NetworkInstanceInterfaceHandler(_Handler):
     def get_resource_key(self) -> str: return '/network_instance/interface'
-    def get_path(self) -> str: return '/network-instances/network-instance/interfaces'
+    def get_path(self) -> str: return '/openconfig-network-instance:network-instances/network-instance/interfaces'
+
+    def compose(
+        self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False
+    ) -> Tuple[str, str]:
+        ni_name   = get_str(resource_value, 'name'           ) # test-svc
+        ni_if_id  = get_str(resource_value, 'id'             ) # ethernet-1/1.0
+        if_name   = get_str(resource_value, 'interface'      ) # ethernet-1/1
+        sif_index = get_int(resource_value, 'subinterface', 0) # 0
 
-    def compose(self, resource_key : str, resource_value : Dict, delete : bool = False) -> Tuple[str, str]:
-        ni_name   = str(resource_value['name'     ])    # test-svc
-        if_name   = str(resource_value['if_name'  ])    # ethernet-1/1
-        sif_index = int(resource_value['sif_index'])    # 0
-        if_id     = '{:s}.{:d}'.format(if_name, sif_index)
+        if IS_CEOS: ni_if_id = if_name
 
         if delete:
             PATH_TMPL = '/network-instances/network-instance[name={:s}]/interfaces/interface[id={:s}]'
-            str_path = PATH_TMPL.format(ni_name, if_id)
+            str_path = PATH_TMPL.format(ni_name, ni_if_id)
             str_data = json.dumps({})
             return str_path, str_data
 
-        str_path = '/network-instances/network-instance[name={:s}]/interfaces/interface[id={:s}]'.format(ni_name, if_id)
-        str_data = json.dumps({
-            'id': if_id,
-            'config': {'id': if_id, 'interface': if_name, 'subinterface': sif_index},
-        })
+        str_path = '/network-instances/network-instance[name={:s}]/interfaces/interface[id={:s}]'.format(
+            ni_name, ni_if_id
+        )
+        #str_data = json.dumps({
+        #    'id': if_id,
+        #    'config': {'id': if_id, 'interface': if_name, 'subinterface': sif_index},
+        #})
+
+        yang_nis : libyang.DContainer = yang_handler.get_data_path('/openconfig-network-instance:network-instances')
+        yang_ni : libyang.DContainer = yang_nis.create_path('network-instance[name="{:s}"]'.format(ni_name))
+        yang_ni_ifs : libyang.DContainer = yang_ni.create_path('interfaces')
+        yang_ni_if_path = 'interface[id="{:s}"]'.format(ni_if_id)
+        yang_ni_if : libyang.DContainer = yang_ni_ifs.create_path(yang_ni_if_path)
+        yang_ni_if.create_path('config/id',           ni_if_id)
+        yang_ni_if.create_path('config/interface',    if_name)
+        yang_ni_if.create_path('config/subinterface', sif_index)
+
+        str_data = yang_ni_if.print_mem('json')
+        json_data = json.loads(str_data)
+        json_data = json_data['openconfig-network-instance:interface'][0]
+        str_data = json.dumps(json_data)
         return str_path, str_data
 
-    def parse(self, json_data : Dict) -> List[Tuple[str, Dict[str, Any]]]:
+    def parse(
+        self, json_data : Dict, yang_handler : YangHandler
+    ) -> List[Tuple[str, Dict[str, Any]]]:
+        LOGGER.debug('[parse] json_data = {:s}'.format(str(json_data)))
         response = []
         return response
diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceProtocol.py b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceProtocol.py
new file mode 100644
index 0000000000000000000000000000000000000000..f155fa1ca5c0a990abf18ab1c78059673d94e04e
--- /dev/null
+++ b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceProtocol.py
@@ -0,0 +1,79 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, libyang, logging
+from typing import Any, Dict, List, Tuple
+from ._Handler import _Handler
+from .Tools import get_str
+from .YangHandler import YangHandler
+
+LOGGER = logging.getLogger(__name__)
+
+class NetworkInstanceProtocolHandler(_Handler):
+    def get_resource_key(self) -> str: return '/network_instance/protocols'
+    def get_path(self) -> str:
+        return '/openconfig-network-instance:network-instances/network-instance/protocols/protocol'
+
+    def compose(
+        self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False
+    ) -> Tuple[str, str]:
+        ni_name    = get_str(resource_value, 'name'  )          # test-svc
+        identifier = get_str(resource_value, 'identifier')      # 'STATIC'
+        proto_name = get_str(resource_value, 'protocol_name')   # 'STATIC'
+
+        if ':' not in identifier:
+            identifier = 'openconfig-policy-types:{:s}'.format(identifier)
+        PATH_TMPL = '/network-instances/network-instance[name={:s}]/protocols/protocol[identifier={:s}][name={:s}]'
+        str_path = PATH_TMPL.format(ni_name, identifier, proto_name)
+
+        if delete:
+            str_data = json.dumps({})
+            return str_path, str_data
+
+        #str_data = json.dumps({
+        #    'identifier': identifier, 'name': name,
+        #    'config': {'identifier': identifier, 'name': name, 'enabled': True},
+        #    'static_routes': {'static': [{
+        #        'prefix': prefix,
+        #        'config': {'prefix': prefix},
+        #        'next_hops': {
+        #            'next-hop': [{
+        #                'index': next_hop_index,
+        #                'config': {'index': next_hop_index, 'next_hop': next_hop}
+        #            }]
+        #        }
+        #    }]}
+        #})
+
+        yang_nis : libyang.DContainer = yang_handler.get_data_path('/openconfig-network-instance:network-instances')
+        yang_ni : libyang.DContainer = yang_nis.create_path('network-instance[name="{:s}"]'.format(ni_name))
+        yang_ni_prs : libyang.DContainer = yang_ni.create_path('protocols')
+        yang_ni_pr_path = 'protocol[identifier="{:s}"][name="{:s}"]'.format(identifier, proto_name)
+        yang_ni_pr : libyang.DContainer = yang_ni_prs.create_path(yang_ni_pr_path)
+        yang_ni_pr.create_path('config/identifier', identifier)
+        yang_ni_pr.create_path('config/name',       proto_name)
+        yang_ni_pr.create_path('config/enabled',    True      )
+
+        str_data = yang_ni_pr.print_mem('json')
+        json_data = json.loads(str_data)
+        json_data = json_data['openconfig-network-instance:protocol'][0]
+        str_data = json.dumps(json_data)
+        return str_path, str_data
+
+    def parse(
+        self, json_data : Dict, yang_handler : YangHandler
+    ) -> List[Tuple[str, Dict[str, Any]]]:
+        LOGGER.debug('[parse] json_data = {:s}'.format(str(json_data)))
+        response = []
+        return response
diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceStaticRoute.py b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceStaticRoute.py
index 6294f9c9bf46b3b60da48f19c08ad57f096fa39b..9f80b647b8e1188c80609dbdb47bbe9ea0d68b5f 100644
--- a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceStaticRoute.py
+++ b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceStaticRoute.py
@@ -12,50 +12,90 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import json, logging
+import json, libyang, logging
 from typing import Any, Dict, List, Tuple
 from ._Handler import _Handler
+from .Tools import get_int, get_str
+from .YangHandler import YangHandler
 
 LOGGER = logging.getLogger(__name__)
 
 class NetworkInstanceStaticRouteHandler(_Handler):
-    def get_resource_key(self) -> str: return '/network_instance/static_route'
-    def get_path(self) -> str: return '/network-instances/network-instance/static_route'
+    def get_resource_key(self) -> str: return '/network_instance/protocols/static_route'
+    def get_path(self) -> str:
+        return '/openconfig-network-instance:network-instances/network-instance/protocols/protocol/static-routes'
 
-    def compose(self, resource_key : str, resource_value : Dict, delete : bool = False) -> Tuple[str, str]:
-        ni_name        = str(resource_value['name'                 ]) # test-svc
-        prefix         = str(resource_value['prefix'               ]) # '172.0.1.0/24'
+    def compose(
+        self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False
+    ) -> Tuple[str, str]:
+        ni_name    = get_str(resource_value, 'name'  )          # test-svc
+        identifier = get_str(resource_value, 'identifier')      # 'STATIC'
+        proto_name = get_str(resource_value, 'protocol_name')   # 'STATIC'
+        prefix     = get_str(resource_value, 'prefix')          # '172.0.1.0/24'
+
+        if ':' not in identifier:
+            identifier = 'openconfig-policy-types:{:s}'.format(identifier)
 
-        identifier = 'STATIC'
-        name = 'static'
         if delete:
             PATH_TMPL  = '/network-instances/network-instance[name={:s}]/protocols'
             PATH_TMPL += '/protocol[identifier={:s}][name={:s}]/static-routes/static[prefix={:s}]'
-            str_path = PATH_TMPL.format(ni_name, identifier, name, prefix)
+            str_path = PATH_TMPL.format(ni_name, identifier, proto_name, prefix)
             str_data = json.dumps({})
             return str_path, str_data
 
-        next_hop       = str(resource_value['next_hop'             ]) # '172.0.0.1'
-        next_hop_index = int(resource_value.get('next_hop_index', 0)) # 0
+        next_hop = get_str(resource_value, 'next_hop')  # '172.0.0.1'
+        metric   = get_int(resource_value, 'metric'  )  # 20
+        index    = get_str(resource_value, 'index'   )  # AUTO_1_172-0-0-1
+        if index is None:
+            index = 'AUTO_{:d}_{:s}'.format(metric, next_hop)
 
         PATH_TMPL = '/network-instances/network-instance[name={:s}]/protocols/protocol[identifier={:s}][name={:s}]'
-        str_path = PATH_TMPL.format(ni_name, identifier, name)
-        str_data = json.dumps({
-            'identifier': identifier, 'name': name,
-            'config': {'identifier': identifier, 'name': name, 'enabled': True},
-            'static_routes': {'static': [{
-                'prefix': prefix,
-                'config': {'prefix': prefix},
-                'next_hops': {
-                    'next-hop': [{
-                        'index': next_hop_index,
-                        'config': {'index': next_hop_index, 'next_hop': next_hop}
-                    }]
-                }
-            }]}
-        })
+        str_path = PATH_TMPL.format(ni_name, identifier, proto_name)
+        #str_data = json.dumps({
+        #    'identifier': identifier, 'name': name,
+        #    'config': {'identifier': identifier, 'name': name, 'enabled': True},
+        #    'static_routes': {'static': [{
+        #        'prefix': prefix,
+        #        'config': {'prefix': prefix},
+        #        'next_hops': {
+        #            'next-hop': [{
+        #                'index': next_hop_index,
+        #                'config': {'index': next_hop_index, 'next_hop': next_hop}
+        #            }]
+        #        }
+        #    }]}
+        #})
+
+        yang_nis : libyang.DContainer = yang_handler.get_data_path('/openconfig-network-instance:network-instances')
+        yang_ni : libyang.DContainer = yang_nis.create_path('network-instance[name="{:s}"]'.format(ni_name))
+        yang_ni_prs : libyang.DContainer = yang_ni.create_path('protocols')
+        yang_ni_pr_path = 'protocol[identifier="{:s}"][name="{:s}"]'.format(identifier, proto_name)
+        yang_ni_pr : libyang.DContainer = yang_ni_prs.create_path(yang_ni_pr_path)
+        yang_ni_pr.create_path('config/identifier', identifier)
+        yang_ni_pr.create_path('config/name',       proto_name)
+        yang_ni_pr.create_path('config/enabled',    True      )
+
+        yang_ni_pr_srs : libyang.DContainer = yang_ni_pr.create_path('static-routes')
+        yang_ni_pr_sr_path = 'static[prefix="{:s}"]'.format(prefix)
+        yang_ni_pr_sr : libyang.DContainer = yang_ni_pr_srs.create_path(yang_ni_pr_sr_path)
+        yang_ni_pr_sr.create_path('config/prefix', prefix)
+
+        yang_ni_pr_sr_nhs : libyang.DContainer = yang_ni_pr_sr.create_path('next-hops')
+        yang_ni_pr_sr_nh_path = 'next-hop[index="{:s}"]'.format(index)
+        yang_ni_pr_sr_nh : libyang.DContainer = yang_ni_pr_sr_nhs.create_path(yang_ni_pr_sr_nh_path)
+        yang_ni_pr_sr_nh.create_path('config/index',    index)
+        yang_ni_pr_sr_nh.create_path('config/next-hop', next_hop)
+        yang_ni_pr_sr_nh.create_path('config/metric',   metric)
+
+        str_data = yang_ni_pr.print_mem('json')
+        json_data = json.loads(str_data)
+        json_data = json_data['openconfig-network-instance:protocol'][0]
+        str_data = json.dumps(json_data)
         return str_path, str_data
 
-    def parse(self, json_data : Dict) -> List[Tuple[str, Dict[str, Any]]]:
+    def parse(
+        self, json_data : Dict, yang_handler : YangHandler
+    ) -> List[Tuple[str, Dict[str, Any]]]:
+        LOGGER.debug('[parse] json_data = {:s}'.format(str(json_data)))
         response = []
         return response
diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/Tools.py b/src/device/service/drivers/gnmi_openconfig/handlers/Tools.py
index 358c7de9fa3b9c46c4a7c70142deb1c2ab396ad0..f258de926d44c48a655eab877eeb0350df511b69 100644
--- a/src/device/service/drivers/gnmi_openconfig/handlers/Tools.py
+++ b/src/device/service/drivers/gnmi_openconfig/handlers/Tools.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import re
-from typing import Any, Dict, Iterable
+from typing import Any, Callable, Dict, Iterable, Optional
 
 RE_REMOVE_FILTERS = re.compile(r'\[[^\]]+\]')
 RE_REMOVE_NAMESPACES = re.compile(r'\/[a-zA-Z0-9\_\-]+:')
@@ -23,8 +23,39 @@ def get_schema(resource_key : str):
     resource_key = RE_REMOVE_NAMESPACES.sub('/', resource_key)
     return resource_key
 
-def dict_get_first(d : Dict, field_names : Iterable[str], default=None) -> Any:
-    for field_name in field_names:
-        if field_name not in d: continue
-        return d[field_name]
+def container_get_first(
+    container : Dict[str, Any], key_name : str, namespace : Optional[str]=None, namespaces : Iterable[str]=tuple(),
+    default : Optional[Any] = None
+) -> Any:
+    value = container.get(key_name)
+    if value is not None: return value
+
+    if namespace is not None:
+        if len(namespaces) > 0:
+            raise Exception('At maximum, one of namespace or namespaces can be specified')
+        namespaces = (namespace,)
+
+    for namespace in namespaces:
+        namespace_key_name = '{:s}:{:s}'.format(namespace, key_name)
+        if namespace_key_name in container: return container[namespace_key_name]
+
     return default
+
+def get_value(
+    resource_value : Dict, field_name : str, cast_func : Callable = lambda x:x, default : Optional[Any] = None
+) -> Optional[Any]:
+    field_value = resource_value.get(field_name, default)
+    if field_value is not None: field_value = cast_func(field_value)
+    return field_value
+
+def get_bool(resource_value : Dict, field_name : bool, default : Optional[Any] = None) -> bool:
+    return get_value(resource_value, field_name, cast_func=bool, default=default)
+
+def get_float(resource_value : Dict, field_name : float, default : Optional[Any] = None) -> float:
+    return get_value(resource_value, field_name, cast_func=float, default=default)
+
+def get_int(resource_value : Dict, field_name : int, default : Optional[Any] = None) -> int:
+    return get_value(resource_value, field_name, cast_func=int, default=default)
+
+def get_str(resource_value : Dict, field_name : str, default : Optional[Any] = None) -> str:
+    return get_value(resource_value, field_name, cast_func=str, default=default)
diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/YangHandler.py b/src/device/service/drivers/gnmi_openconfig/handlers/YangHandler.py
new file mode 100644
index 0000000000000000000000000000000000000000..944dfa60781162d841c5da12813a4581c282471b
--- /dev/null
+++ b/src/device/service/drivers/gnmi_openconfig/handlers/YangHandler.py
@@ -0,0 +1,110 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, libyang, logging, os
+from typing import Dict, Optional
+
+YANG_BASE_PATH = os.path.join(os.path.dirname(__file__), '..', 'git', 'openconfig', 'public')
+YANG_SEARCH_PATHS = ':'.join([
+    os.path.join(YANG_BASE_PATH, 'release'),
+    os.path.join(YANG_BASE_PATH, 'third_party'),
+])
+
+YANG_MODULES = [
+    'iana-if-type',
+    'openconfig-bgp-types',
+    'openconfig-vlan-types',
+
+    'openconfig-interfaces',
+    'openconfig-if-8021x',
+    'openconfig-if-aggregate',
+    'openconfig-if-ethernet-ext',
+    'openconfig-if-ethernet',
+    'openconfig-if-ip-ext',
+    'openconfig-if-ip',
+    'openconfig-if-poe',
+    'openconfig-if-sdn-ext',
+    'openconfig-if-tunnel',
+
+    'openconfig-vlan',
+
+    'openconfig-types',
+    'openconfig-policy-types',
+    'openconfig-mpls-types',
+    'openconfig-network-instance-types',
+    'openconfig-network-instance',
+
+    'openconfig-platform',
+    'openconfig-platform-controller-card',
+    'openconfig-platform-cpu',
+    'openconfig-platform-ext',
+    'openconfig-platform-fabric',
+    'openconfig-platform-fan',
+    'openconfig-platform-integrated-circuit',
+    'openconfig-platform-linecard',
+    'openconfig-platform-pipeline-counters',
+    'openconfig-platform-port',
+    'openconfig-platform-psu',
+    'openconfig-platform-software',
+    'openconfig-platform-transceiver',
+    'openconfig-platform-types',
+]
+
+LOGGER = logging.getLogger(__name__)
+
+class YangHandler:
+    def __init__(self) -> None:
+        self._yang_context = libyang.Context(YANG_SEARCH_PATHS)
+        self._loaded_modules = set()
+        for yang_module_name in YANG_MODULES:
+            LOGGER.info('Loading module: {:s}'.format(str(yang_module_name)))
+            self._yang_context.load_module(yang_module_name).feature_enable_all()
+            self._loaded_modules.add(yang_module_name)
+        self._data_path_instances = dict()
+
+    def get_data_paths(self) -> Dict[str, libyang.DNode]:
+        return self._data_path_instances
+
+    def get_data_path(self, path : str) -> libyang.DNode:
+        data_path_instance = self._data_path_instances.get(path)
+        if data_path_instance is None:
+            data_path_instance = self._yang_context.create_data_path(path)
+            self._data_path_instances[path] = data_path_instance
+        return data_path_instance
+
+    def parse_to_dict(
+        self, request_path : str, json_data : Dict, fmt : str = 'json', strict : bool = True
+    ) -> Dict:
+        if fmt != 'json': raise Exception('Unsupported format: {:s}'.format(str(fmt)))
+        LOGGER.debug('request_path = {:s}'.format(str(request_path)))
+        LOGGER.debug('json_data = {:s}'.format(str(json_data)))
+        LOGGER.debug('format = {:s}'.format(str(fmt)))
+
+        parent_path_parts = list(filter(lambda s: len(s) > 0, request_path.split('/')))
+        for parent_path_part in reversed(parent_path_parts):
+            json_data = {parent_path_part: json_data}
+        str_data = json.dumps(json_data)
+
+        dnode : Optional[libyang.DNode] = self._yang_context.parse_data_mem(
+            str_data, fmt, strict=strict, parse_only=True, #validate_present=True, #validate=True,
+        )
+        if dnode is None: raise Exception('Unable to parse Data({:s})'.format(str(json_data)))
+
+        parsed = dnode.print_dict()
+        LOGGER.debug('parsed = {:s}'.format(json.dumps(parsed)))
+        dnode.free()
+        return parsed
+
+    def destroy(self) -> None:
+        self._yang_context.destroy()
diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/_Handler.py b/src/device/service/drivers/gnmi_openconfig/handlers/_Handler.py
index f051c43534ab4e1d6c15a34ae2070067034ba9e1..215a4d499e534ca9797c487adb0c798487a4a7cf 100644
--- a/src/device/service/drivers/gnmi_openconfig/handlers/_Handler.py
+++ b/src/device/service/drivers/gnmi_openconfig/handlers/_Handler.py
@@ -13,6 +13,7 @@
 # limitations under the License.
 
 from typing import Any, Dict, List, Tuple
+from .YangHandler import YangHandler
 
 class _Handler:
     def get_resource_key(self) -> str:
@@ -23,10 +24,14 @@ class _Handler:
         # Retrieve the OpenConfig path schema used to interrogate the device
         raise NotImplementedError()
 
-    def compose(self, resource_key : str, resource_value : Dict, delete : bool = False) -> Tuple[str, str]:
+    def compose(
+        self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False
+    ) -> Tuple[str, str]:
         # Compose a Set/Delete message based on the resource_key/resource_value fields, and the delete flag
         raise NotImplementedError()
 
-    def parse(self, json_data : Dict) -> List[Tuple[str, Dict[str, Any]]]:
+    def parse(
+        self, json_data : Dict, yang_handler : YangHandler
+    ) -> List[Tuple[str, Dict[str, Any]]]:
         # Parse a Reply from the device and return a list of resource_key/resource_value pairs
         raise NotImplementedError()
diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py b/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py
index 4ff8e8b38b4a76ba9bb0d2b91fb70b54b06a170e..b36313bb2030ed9d07f1f3c198ea9447a0cafbaa 100644
--- a/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py
+++ b/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 import logging
-from typing import Dict, List, Optional, Tuple, Union
+from typing import Any, Dict, List, Optional, Tuple, Union
 from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES
 from ._Handler import _Handler
 from .Component import ComponentHandler
@@ -21,8 +21,10 @@ from .Interface import InterfaceHandler
 from .InterfaceCounter import InterfaceCounterHandler
 from .NetworkInstance import NetworkInstanceHandler
 from .NetworkInstanceInterface import NetworkInstanceInterfaceHandler
+from .NetworkInstanceProtocol import NetworkInstanceProtocolHandler
 from .NetworkInstanceStaticRoute import NetworkInstanceStaticRouteHandler
 from .Tools import get_schema
+from .YangHandler import YangHandler
 
 LOGGER = logging.getLogger(__name__)
 
@@ -31,6 +33,7 @@ ifaceh = InterfaceHandler()
 ifctrh = InterfaceCounterHandler()
 nih    = NetworkInstanceHandler()
 niifh  = NetworkInstanceInterfaceHandler()
+niph   = NetworkInstanceProtocolHandler()
 nisrh  = NetworkInstanceStaticRouteHandler()
 
 ALL_RESOURCE_KEYS = [
@@ -46,9 +49,10 @@ RESOURCE_KEY_MAPPER = {
 }
 
 PATH_MAPPER = {
-    '/components'        : comph.get_path(),
-    '/interfaces'        : ifaceh.get_path(),
-    '/network-instances' : nih.get_path(),
+    '/components'           : comph.get_path(),
+    '/components/component' : comph.get_path(),
+    '/interfaces'           : ifaceh.get_path(),
+    '/network-instances'    : nih.get_path(),
 }
 
 RESOURCE_KEY_TO_HANDLER = {
@@ -57,6 +61,7 @@ RESOURCE_KEY_TO_HANDLER = {
     ifctrh.get_resource_key() : ifctrh,
     nih.get_resource_key()    : nih,
     niifh.get_resource_key()  : niifh,
+    niph.get_resource_key()   : niph,
     nisrh.get_resource_key()  : nisrh,
 }
 
@@ -66,11 +71,13 @@ PATH_TO_HANDLER = {
     ifctrh.get_path() : ifctrh,
     nih.get_path()    : nih,
     niifh.get_path()  : niifh,
+    niph.get_path()   : niph,
     nisrh.get_path()  : nisrh,
 }
 
 def get_handler(
-    resource_key : Optional[str] = None, path : Optional[str] = None, raise_if_not_found=True
+    resource_key : Optional[str] = None, path : Optional[str] = None,
+    raise_if_not_found=True
 ) -> Optional[_Handler]:
     if (resource_key is None) == (path is None):
         MSG = 'Exactly one of resource_key({:s}) or path({:s}) must be specified'
@@ -88,16 +95,24 @@ def get_handler(
         path_schema = PATH_MAPPER.get(path_schema, path_schema)
         handler = PATH_TO_HANDLER.get(path_schema)
         if handler is None and raise_if_not_found:
-            MSG = 'Handler not found: resource_key={:s} resource_key_schema={:s}'
+            MSG = 'Handler not found: path={:s} path_schema={:s}'
             # pylint: disable=broad-exception-raised
-            raise Exception(MSG.format(str(resource_key), str(resource_key_schema)))
+            raise Exception(MSG.format(str(path), str(path_schema)))
     return handler
 
 def get_path(resource_key : str) -> str:
-    return get_handler(resource_key=resource_key).get_path()
+    handler = get_handler(resource_key=resource_key)
+    return handler.get_path()
 
-def parse(str_path : str, value : Union[Dict, List]):
-    return get_handler(path=str_path).parse(value)
+def parse(
+    str_path : str, value : Union[Dict, List], yang_handler : YangHandler
+) -> List[Tuple[str, Dict[str, Any]]]:
+    handler = get_handler(path=str_path)
+    return handler.parse(value, yang_handler)
 
-def compose(resource_key : str, resource_value : Union[Dict, List], delete : bool = False) -> Tuple[str, str]:
-    return get_handler(resource_key=resource_key).compose(resource_key, resource_value, delete=delete)
+def compose(
+    resource_key : str, resource_value : Union[Dict, List],
+    yang_handler : YangHandler, delete : bool = False
+) -> Tuple[str, str]:
+    handler = get_handler(resource_key=resource_key)
+    return handler.compose(resource_key, resource_value, yang_handler, delete=delete)
diff --git a/src/device/service/drivers/gnmi_openconfig/tools/Capabilities.py b/src/device/service/drivers/gnmi_openconfig/tools/Capabilities.py
index 093a96233f26399be4e224a48a78ea6329da4304..66b30da9832e661b35e1f13b3e3a47c7a50098cb 100644
--- a/src/device/service/drivers/gnmi_openconfig/tools/Capabilities.py
+++ b/src/device/service/drivers/gnmi_openconfig/tools/Capabilities.py
@@ -17,7 +17,7 @@ from common.tools.grpc.Tools import grpc_message_to_json
 from ..gnmi.gnmi_pb2 import CapabilityRequest   # pylint: disable=no-name-in-module
 from ..gnmi.gnmi_pb2_grpc import gNMIStub
 
-def get_supported_encodings(
+def check_capabilities(
     stub : gNMIStub, username : str, password : str, timeout : Optional[int] = None
 ) -> Set[Union[str, int]]:
     metadata = [('username', username), ('password', password)]
@@ -25,6 +25,17 @@ def get_supported_encodings(
     reply = stub.Capabilities(req, metadata=metadata, timeout=timeout)
 
     data = grpc_message_to_json(reply)
+
+    gnmi_version = data.get('gNMI_version')
+    if gnmi_version is None or gnmi_version != '0.7.0':
+        raise Exception('Unsupported gNMI version: {:s}'.format(str(gnmi_version)))
+
+    #supported_models = {
+    #    supported_model['name']: supported_model['version']
+    #    for supported_model in data.get('supported_models', [])
+    #}
+    # TODO: check supported models and versions
+
     supported_encodings = {
         supported_encoding
         for supported_encoding in data.get('supported_encodings', [])
@@ -33,4 +44,6 @@ def get_supported_encodings(
     if len(supported_encodings) == 0:
         # pylint: disable=broad-exception-raised
         raise Exception('No supported encodings found')
-    return supported_encodings
+    if 'JSON_IETF' not in supported_encodings:
+        # pylint: disable=broad-exception-raised
+        raise Exception('JSON_IETF encoding not supported')
diff --git a/src/device/service/drivers/gnmi_openconfig/tools/Path.py b/src/device/service/drivers/gnmi_openconfig/tools/Path.py
index 7ce0631dada58ce581900c974a0b24d170df2f39..1955fc4d94eff0106d324a067809f93151da5699 100644
--- a/src/device/service/drivers/gnmi_openconfig/tools/Path.py
+++ b/src/device/service/drivers/gnmi_openconfig/tools/Path.py
@@ -19,8 +19,8 @@ from ..gnmi.gnmi_pb2 import Path, PathElem
 RE_PATH_SPLIT = re.compile(r'/(?=(?:[^\[\]]|\[[^\[\]]+\])*$)')
 RE_PATH_KEYS = re.compile(r'\[(.*?)\]')
 
-def path_from_string(path='/'):
-    if not path: return Path(elem=[])
+def path_from_string(path='/'): #, origin='openconfig'
+    if not path: return Path(elem=[]) #, origin=origin
 
     if path[0] == '/':
         if path[-1] == '/':
@@ -40,7 +40,7 @@ def path_from_string(path='/'):
         dict_keys = dict(x.split('=', 1) for x in elem_keys)
         path.append(PathElem(name=elem_name, key=dict_keys))
 
-    return Path(elem=path)
+    return Path(elem=path) #, origin=origin
 
 def path_to_string(path : Path) -> str:
     path_parts = list()
diff --git a/src/device/service/drivers/gnmi_openconfig/tools/Value.py b/src/device/service/drivers/gnmi_openconfig/tools/Value.py
index 077bdd40ec8859202b8e8e6053b731339edfe7fe..325dacd51b8b1157ea63ff71eed1f9a758d56c3b 100644
--- a/src/device/service/drivers/gnmi_openconfig/tools/Value.py
+++ b/src/device/service/drivers/gnmi_openconfig/tools/Value.py
@@ -13,9 +13,36 @@
 # limitations under the License.
 
 import base64, json
-from typing import Any
+from typing import Any, Dict, List, Union
 from ..gnmi.gnmi_pb2 import TypedValue
 
+REMOVE_NAMESPACES = (
+    'arista-intf-augments',
+    'arista-netinst-augments',
+    'openconfig-hercules-platform',
+)
+
+def remove_fields(key : str) -> bool:
+    parts = key.split(':')
+    if len(parts) == 1: return False
+    namespace = parts[0].lower()
+    return namespace in REMOVE_NAMESPACES
+
+def recursive_remove_keys(container : Union[Dict, List, Any]) -> None:
+    if isinstance(container, dict):
+        remove_keys = [
+            key
+            for key in container.keys()
+            if remove_fields(key)
+        ]
+        for key in remove_keys:
+            container.pop(key, None)
+        for value in container.values():
+            recursive_remove_keys(value)
+    elif isinstance(container, list):
+        for value in container:
+            recursive_remove_keys(value)
+
 def decode_value(value : TypedValue) -> Any:
     encoding = value.WhichOneof('value')
     if encoding == 'json_val':
@@ -31,9 +58,13 @@ def decode_value(value : TypedValue) -> Any:
         raise NotImplementedError()
         #return value
     elif encoding == 'json_ietf_val':
-        value : str = value.json_ietf_val
+        str_value : str = value.json_ietf_val.decode('UTF-8')
         try:
-            return json.loads(value)
+            # Cleanup and normalize the records according to OpenConfig
+            #str_value = str_value.replace('openconfig-platform-types:', 'oc-platform-types:')
+            json_value = json.loads(str_value)
+            recursive_remove_keys(json_value)
+            return json_value
         except json.decoder.JSONDecodeError:
             # Assume is Base64-encoded
             b_b64_value = value.encode('UTF-8')
diff --git a/src/device/tests/gnmi_openconfig/__init__.py b/src/device/tests/gnmi_openconfig/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/device/tests/gnmi_openconfig/storage/Storage.py b/src/device/tests/gnmi_openconfig/storage/Storage.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba3596be791e0bc8a473596e6c238eb87fe21f89
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/storage/Storage.py
@@ -0,0 +1,23 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .StorageEndpoints import StorageEndpoints
+from .StorageInterface import StorageInterface
+from .StorageNetworkInstance import StorageNetworkInstance
+
+class Storage:
+    def __init__(self) -> None:
+        self.endpoints         = StorageEndpoints()
+        self.interfaces        = StorageInterface()
+        self.network_instances = StorageNetworkInstance()
diff --git a/src/device/tests/gnmi_openconfig/storage/StorageEndpoints.py b/src/device/tests/gnmi_openconfig/storage/StorageEndpoints.py
new file mode 100644
index 0000000000000000000000000000000000000000..9876c8019317e33e58a64a86a477ea06c1ce76a9
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/storage/StorageEndpoints.py
@@ -0,0 +1,75 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+from typing import Dict, List, Tuple
+from .Tools import compose_resources
+
+RE_RESKEY_ENDPOINT = re.compile(r'^\/endpoints\/endpoint\[([^\]]+)\]$')
+
+ENDPOINT_PACKET_SAMPLE_TYPES : Dict[int, str] = {
+    101: '/openconfig-interfaces:interfaces/interface[name={:s}]/state/counters/out-pkts',
+    102: '/openconfig-interfaces:interfaces/interface[name={:s}]/state/counters/in-pkts',
+    201: '/openconfig-interfaces:interfaces/interface[name={:s}]/state/counters/out-octets',
+    202: '/openconfig-interfaces:interfaces/interface[name={:s}]/state/counters/in-octets',
+}
+
+class Endpoints:
+    STRUCT : List[Tuple[str, List[str]]] = [
+        ('/endpoints/endpoint[{:s}]', ['uuid', 'type', 'sample_types']),
+    ]
+
+    def __init__(self) -> None:
+        self._items : Dict[str, Dict] = dict()
+
+    def add(self, ep_uuid : str, resource_value : Dict) -> None:
+        item = self._items.setdefault(ep_uuid, dict())
+        item['uuid'] = ep_uuid
+
+        for _, field_names in Endpoints.STRUCT:
+            field_names = set(field_names)
+            item.update({k:v for k,v in resource_value.items() if k in field_names})
+
+        item['sample_types'] = {
+            sample_type_id : sample_type_path.format(ep_uuid)
+            for sample_type_id, sample_type_path in ENDPOINT_PACKET_SAMPLE_TYPES.items()
+        }
+
+    def get(self, ep_uuid : str) -> Dict:
+        return self._items.get(ep_uuid)
+
+    def remove(self, ep_uuid : str) -> None:
+        self._items.pop(ep_uuid, None)
+    
+    def compose_resources(self) -> List[Dict]:
+        return compose_resources(self._items, Endpoints.STRUCT)
+
+class StorageEndpoints:
+    def __init__(self) -> None:
+        self.endpoints = Endpoints()
+
+    def populate(self, resources : List[Tuple[str, Dict]]) -> None:
+        for resource_key, resource_value in resources:
+            match = RE_RESKEY_ENDPOINT.match(resource_key)
+            if match is not None:
+                self.endpoints.add(match.group(1), resource_value)
+                continue
+
+            MSG = 'Unhandled Resource Key: {:s} => {:s}'
+            raise Exception(MSG.format(str(resource_key), str(resource_value)))
+
+    def get_expected_config(self) -> List[Tuple[str, Dict]]:
+        expected_config = list()
+        expected_config.extend(self.endpoints.compose_resources())
+        return expected_config
diff --git a/src/device/tests/gnmi_openconfig/storage/StorageInterface copy.py b/src/device/tests/gnmi_openconfig/storage/StorageInterface copy.py
new file mode 100644
index 0000000000000000000000000000000000000000..1929ced36b9c597656d4f6022616c78c78539a6a
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/storage/StorageInterface copy.py	
@@ -0,0 +1,134 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+from typing import Dict, List, Tuple
+from .Tools import compose_resources
+
+PREFIX = r'^\/interface\[([^\]]+)\]'
+RE_RESKEY_INTERFACE    = re.compile(PREFIX + r'$')
+RE_RESKEY_ETHERNET     = re.compile(PREFIX + r'\/ethernet$')
+RE_RESKEY_SUBINTERFACE = re.compile(PREFIX + r'\/subinterface\[([^\]]+)\]$')
+#RE_RESKEY_IPV4_ADDRESS = re.compile(PREFIX + r'\/subinterface\[([^\]]+)\]\/ipv4\[([^\]]+)\]$')
+
+class Interfaces:
+    STRUCT : List[Tuple[str, List[str]]] = [
+        ('/interface[{:s}]', ['name', 'type', 'admin-status', 'oper-status', 'management', 'mtu', 'ifindex',
+                              'hardware-port', 'transceiver']),
+        ('/interface[{:s}]/ethernet', ['port-speed', 'negotiated-port-speed', 'mac-address', 'hw-mac-address']),
+    ]
+
+    def __init__(self) -> None:
+        self._items : Dict[str, Dict] = dict()
+
+    def add(self, if_name : str, resource_value : Dict) -> None:
+        item = self._items.setdefault(if_name, dict())
+        item['name'] = if_name
+        for _, field_names in Interfaces.STRUCT:
+            field_names = set(field_names)
+            item.update({k:v for k,v in resource_value.items() if k in field_names})
+
+    def get(self, if_name : str) -> Dict:
+        return self._items.get(if_name)
+
+    def remove(self, if_name : str) -> None:
+        self._items.pop(if_name, None)
+    
+    def compose_resources(self) -> List[Dict]:
+        return compose_resources(self._items, Interfaces.STRUCT)
+
+class SubInterfaces:
+    STRUCT : List[Tuple[str, List[str]]] = [
+        ('/interface[{:s}]/subinterface[{:d}]', ['index']),
+    ]
+
+    def __init__(self) -> None:
+        self._items : Dict[Tuple[str, int], Dict] = dict()
+
+    def add(self, if_name : str, subif_index : int) -> None:
+        item = self._items.setdefault((if_name, subif_index), dict())
+        item['index'] = subif_index
+
+    def get(self, if_name : str, subif_index : int) -> Dict:
+        return self._items.get((if_name, subif_index))
+
+    def remove(self, if_name : str, subif_index : int) -> None:
+        self._items.pop((if_name, subif_index), None)
+    
+    def compose_resources(self) -> List[Dict]:
+        return compose_resources(self._items, SubInterfaces.STRUCT)
+
+class IPv4Addresses:
+    STRUCT : List[Tuple[str, List[str]]] = [
+        ('/interface[{:s}]/subinterface[{:d}]', ['index', 'address_ip', 'address_prefix', 'origin']),
+    ]
+
+    def __init__(self) -> None:
+        self._items : Dict[Tuple[str, int], Dict] = dict()
+
+    def add(self, if_name : str, subif_index : int, ipv4_address : str, resource_value : Dict) -> None:
+        item = self._items.setdefault((if_name, subif_index), dict())
+        item['index'         ] = subif_index
+        item['address_ip'    ] = ipv4_address
+        item['origin'        ] = resource_value.get('origin')
+        item['address_prefix'] = resource_value.get('prefix')
+
+    def get(self, if_name : str, subif_index : int, ipv4_address : str) -> Dict:
+        return self._items.get((if_name, subif_index))
+
+    def remove(self, if_name : str, subif_index : int, ipv4_address : str) -> None:
+        self._items.pop((if_name, subif_index), None)
+
+    def compose_resources(self) -> List[Dict]:
+        return compose_resources(self._items, IPv4Addresses.STRUCT)
+
+class StorageInterface:
+    def __init__(self) -> None:
+        self.interfaces     = Interfaces()
+        self.subinterfaces  = SubInterfaces()
+        self.ipv4_addresses = IPv4Addresses()
+
+    def populate(self, resources : List[Tuple[str, Dict]]) -> None:
+        for resource_key, resource_value in resources:
+            match = RE_RESKEY_INTERFACE.match(resource_key)
+            if match is not None:
+                self.interfaces.add(match.group(1), resource_value)
+                continue
+
+            match = RE_RESKEY_ETHERNET.match(resource_key)
+            if match is not None:
+                self.interfaces.add(match.group(1), resource_value)
+                continue
+
+            match = RE_RESKEY_SUBINTERFACE.match(resource_key)
+            if match is not None:
+                self.subinterfaces.add(match.group(1), int(match.group(2)))
+                address_ip = resource_value.get('address_ip')
+                self.ipv4_addresses.add(match.group(1), int(match.group(2)), address_ip, resource_value)
+                continue
+
+            #match = RE_RESKEY_IPV4_ADDRESS.match(resource_key)
+            #if match is not None:
+            #    self.ipv4_addresses.add(match.group(1), int(match.group(2)), match.group(3), resource_value)
+            #    continue
+
+            MSG = 'Unhandled Resource Key: {:s} => {:s}'
+            raise Exception(MSG.format(str(resource_key), str(resource_value)))
+
+    def get_expected_config(self) -> List[Tuple[str, Dict]]:
+        expected_config = list()
+        expected_config.extend(self.interfaces.compose_resources())
+        #expected_config.extend(self.subinterfaces.compose_resources())
+        expected_config.extend(self.ipv4_addresses.compose_resources())
+        return expected_config
diff --git a/src/device/tests/gnmi_openconfig/storage/StorageInterface.py b/src/device/tests/gnmi_openconfig/storage/StorageInterface.py
new file mode 100644
index 0000000000000000000000000000000000000000..f07677c123048774326d61c88df61e66c254b612
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/storage/StorageInterface.py
@@ -0,0 +1,131 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+from typing import Dict, List, Tuple
+from .Tools import compose_resources
+
+PREFIX = r'^\/interface\[([^\]]+)\]'
+RE_RESKEY_INTERFACE    = re.compile(PREFIX + r'$')
+RE_RESKEY_ETHERNET     = re.compile(PREFIX + r'\/ethernet$')
+RE_RESKEY_SUBINTERFACE = re.compile(PREFIX + r'\/subinterface\[([^\]]+)\]$')
+RE_RESKEY_IPV4_ADDRESS = re.compile(PREFIX + r'\/subinterface\[([^\]]+)\]\/ipv4\[([^\]]+)\]$')
+
+class Interfaces:
+    STRUCT : List[Tuple[str, List[str]]] = [
+        ('/interface[{:s}]', ['name', 'type', 'admin-status', 'oper-status', 'management', 'mtu', 'ifindex',
+                              'hardware-port', 'transceiver']),
+        ('/interface[{:s}]/ethernet', ['port-speed', 'negotiated-port-speed', 'mac-address', 'hw-mac-address']),
+    ]
+
+    def __init__(self) -> None:
+        self._items : Dict[str, Dict] = dict()
+
+    def add(self, if_name : str, resource_value : Dict) -> None:
+        item = self._items.setdefault(if_name, dict())
+        item['name'] = if_name
+        for _, field_names in Interfaces.STRUCT:
+            field_names = set(field_names)
+            item.update({k:v for k,v in resource_value.items() if k in field_names})
+
+    def get(self, if_name : str) -> Dict:
+        return self._items.get(if_name)
+
+    def remove(self, if_name : str) -> None:
+        self._items.pop(if_name, None)
+    
+    def compose_resources(self) -> List[Dict]:
+        return compose_resources(self._items, Interfaces.STRUCT)
+
+class SubInterfaces:
+    STRUCT : List[Tuple[str, List[str]]] = [
+        ('/interface[{:s}]/subinterface[{:d}]', ['index']),
+    ]
+
+    def __init__(self) -> None:
+        self._items : Dict[Tuple[str, int], Dict] = dict()
+
+    def add(self, if_name : str, subif_index : int) -> None:
+        item = self._items.setdefault((if_name, subif_index), dict())
+        item['index'] = subif_index
+
+    def get(self, if_name : str, subif_index : int) -> Dict:
+        return self._items.get((if_name, subif_index))
+
+    def remove(self, if_name : str, subif_index : int) -> None:
+        self._items.pop((if_name, subif_index), None)
+    
+    def compose_resources(self) -> List[Dict]:
+        return compose_resources(self._items, SubInterfaces.STRUCT)
+
+class IPv4Addresses:
+    STRUCT : List[Tuple[str, List[str]]] = [
+        ('/interface[{:s}]/subinterface[{:d}]/ipv4[{:s}]', ['ip', 'origin', 'prefix']),
+    ]
+
+    def __init__(self) -> None:
+        self._items : Dict[Tuple[str, int, str], Dict] = dict()
+
+    def add(self, if_name : str, subif_index : int, ipv4_address : str, resource_value : Dict) -> None:
+        item = self._items.setdefault((if_name, subif_index, ipv4_address), dict())
+        item['ip'    ] = ipv4_address
+        item['origin'] = resource_value.get('origin')
+        item['prefix'] = resource_value.get('prefix')
+
+    def get(self, if_name : str, subif_index : int, ipv4_address : str) -> Dict:
+        return self._items.get((if_name, subif_index, ipv4_address))
+
+    def remove(self, if_name : str, subif_index : int, ipv4_address : str) -> None:
+        self._items.pop((if_name, subif_index, ipv4_address), None)
+
+    def compose_resources(self) -> List[Dict]:
+        return compose_resources(self._items, IPv4Addresses.STRUCT)
+
+class StorageInterface:
+    def __init__(self) -> None:
+        self.interfaces     = Interfaces()
+        self.subinterfaces  = SubInterfaces()
+        self.ipv4_addresses = IPv4Addresses()
+
+    def populate(self, resources : List[Tuple[str, Dict]]) -> None:
+        for resource_key, resource_value in resources:
+            match = RE_RESKEY_INTERFACE.match(resource_key)
+            if match is not None:
+                self.interfaces.add(match.group(1), resource_value)
+                continue
+
+            match = RE_RESKEY_ETHERNET.match(resource_key)
+            if match is not None:
+                self.interfaces.add(match.group(1), resource_value)
+                continue
+
+            match = RE_RESKEY_SUBINTERFACE.match(resource_key)
+            if match is not None:
+                self.subinterfaces.add(match.group(1), int(match.group(2)))
+                continue
+
+            match = RE_RESKEY_IPV4_ADDRESS.match(resource_key)
+            if match is not None:
+                self.ipv4_addresses.add(match.group(1), int(match.group(2)), match.group(3), resource_value)
+                continue
+
+            MSG = 'Unhandled Resource Key: {:s} => {:s}'
+            raise Exception(MSG.format(str(resource_key), str(resource_value)))
+
+    def get_expected_config(self) -> List[Tuple[str, Dict]]:
+        expected_config = list()
+        expected_config.extend(self.interfaces.compose_resources())
+        expected_config.extend(self.subinterfaces.compose_resources())
+        expected_config.extend(self.ipv4_addresses.compose_resources())
+        return expected_config
diff --git a/src/device/tests/gnmi_openconfig/storage/StorageNetworkInstance.py b/src/device/tests/gnmi_openconfig/storage/StorageNetworkInstance.py
new file mode 100644
index 0000000000000000000000000000000000000000..642099aba80542dd93a504c11778a91be615799f
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/storage/StorageNetworkInstance.py
@@ -0,0 +1,218 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+from typing import Dict, List, Tuple
+from .Tools import compose_resources
+
+PREFIX = r'^\/network\_instance\[([^\]]+)\]'
+RE_RESKEY_NET_INST     = re.compile(PREFIX + r'$')
+RE_RESKEY_INTERFACE    = re.compile(PREFIX + r'\/interface\[([^\]]+)\]$')
+RE_RESKEY_PROTOCOL     = re.compile(PREFIX + r'\/protocol\[([^\]]+)\]$')
+RE_RESKEY_PROTO_STATIC = re.compile(PREFIX + r'\/protocol\[([^\]]+)\]\/static\_routes\[([^\]]+)\]$')
+RE_RESKEY_TABLE        = re.compile(PREFIX + r'\/table\[([^\,]+)\,([^\]]+)\]$')
+RE_RESKEY_VLAN         = re.compile(PREFIX + r'\/vlan\[([^\]]+)\]$')
+
+class NetworkInstances:
+    STRUCT : List[Tuple[str, List[str]]] = [
+        ('/network_instance[{:s}]', ['name', 'type']),
+    ]
+
+    def __init__(self) -> None:
+        self._items : Dict[str, Dict] = dict()
+
+    def add(self, ni_name : str, resource_value : Dict) -> None:
+        item = self._items.setdefault(ni_name, dict())
+        item['name'] = ni_name
+        item['type'] = resource_value.get('type')
+
+    def get(self, ni_name : str) -> Dict:
+        return self._items.get(ni_name)
+
+    def remove(self, ni_name : str) -> None:
+        self._items.pop(ni_name, None)
+    
+    def compose_resources(self) -> List[Dict]:
+        return compose_resources(self._items, NetworkInstances.STRUCT)
+
+class Interfaces:
+    STRUCT : List[Tuple[str, List[str]]] = [
+        ('/network_instance[{:s}]/interface[{:s}.{:d}]', ['name', 'id', 'if_name', 'sif_index']),
+    ]
+
+    def __init__(self) -> None:
+        self._items : Dict[Tuple[str, str], Dict] = dict()
+
+    def add(self, ni_name : str, if_name : str, sif_index : int) -> None:
+        item = self._items.setdefault((ni_name, if_name, sif_index), dict())
+        item['name'     ] = ni_name
+        item['id'       ] = '{:s}.{:d}'.format(if_name, sif_index)
+        item['if_name'  ] = if_name
+        item['sif_index'] = sif_index
+
+    def get(self, ni_name : str, if_name : str, sif_index : int) -> Dict:
+        return self._items.get((ni_name, if_name, sif_index))
+
+    def remove(self, ni_name : str, if_name : str, sif_index : int) -> None:
+        self._items.pop((ni_name, if_name, sif_index), None)
+
+    def compose_resources(self) -> List[Dict]:
+        return compose_resources(self._items, Interfaces.STRUCT)
+
+class Protocols:
+    STRUCT : List[Tuple[str, List[str]]] = [
+        ('/network_instance[{:s}]/protocol[{:s}]', ['id', 'name']),
+    ]
+
+    def __init__(self) -> None:
+        self._items : Dict[Tuple[str, str], Dict] = dict()
+
+    def add(self, ni_name : str, protocol : str) -> None:
+        item = self._items.setdefault((ni_name, protocol), dict())
+        item['id'  ] = protocol
+        item['name'] = protocol
+
+    def get(self, ni_name : str, protocol : str) -> Dict:
+        return self._items.get((ni_name, protocol))
+
+    def remove(self, ni_name : str, protocol : str) -> None:
+        self._items.pop((ni_name, protocol), None)
+
+    def compose_resources(self) -> List[Dict]:
+        return compose_resources(self._items, Protocols.STRUCT)
+
+class StaticRoutes:
+    STRUCT : List[Tuple[str, List[str]]] = [
+        ('/network_instance[{:s}]/protocol[{:s}]/static_routes[{:s}]', ['prefix', 'next_hops']),
+    ]
+
+    def __init__(self) -> None:
+        self._items : Dict[Tuple[str, str, str], Dict] = dict()
+
+    def add(self, ni_name : str, protocol : str, prefix : str, resource_value : Dict) -> None:
+        item = self._items.setdefault((ni_name, protocol, prefix), dict())
+        item['prefix'   ] = prefix
+        item['next_hops'] = resource_value.get('next_hops')
+
+    def get(self, ni_name : str, protocol : str, prefix : str) -> Dict:
+        return self._items.get((ni_name, protocol, prefix))
+
+    def remove(self, ni_name : str, protocol : str, prefix : str) -> None:
+        self._items.pop((ni_name, protocol, prefix), None)
+
+    def compose_resources(self) -> List[Dict]:
+        return compose_resources(self._items, StaticRoutes.STRUCT)
+
+class Tables:
+    STRUCT : List[Tuple[str, List[str]]] = [
+        ('/network_instance[{:s}]/table[{:s},{:s}]', ['protocol', 'address_family']),
+    ]
+
+    def __init__(self) -> None:
+        self._items : Dict[Tuple[str, str, str], Dict] = dict()
+
+    def add(self, ni_name : str, protocol : str, address_family : str) -> None:
+        item = self._items.setdefault((ni_name, protocol, address_family), dict())
+        item['protocol'      ] = protocol
+        item['address_family'] = address_family
+
+    def get(self, ni_name : str, protocol : str, address_family : str) -> Dict:
+        return self._items.get((ni_name, protocol, address_family))
+
+    def remove(self, ni_name : str, protocol : str, address_family : str) -> None:
+        self._items.pop((ni_name, protocol, address_family), None)
+
+    def compose_resources(self) -> List[Dict]:
+        return compose_resources(self._items, Tables.STRUCT)
+
+class Vlans:
+    STRUCT : List[Tuple[str, List[str]]] = [
+        ('/network_instance[{:s}]/vlan[{:d}]', ['vlan_id', 'name', 'members']),
+    ]
+
+    def __init__(self) -> None:
+        self._items : Dict[Tuple[str, int], Dict] = dict()
+
+    def add(self, ni_name : str, vlan_id : int, resource_value : Dict) -> None:
+        item = self._items.setdefault((ni_name, vlan_id), dict())
+        item['vlan_id'] = vlan_id
+        item['name'   ] = resource_value.get('name')
+        item['members'] = sorted(resource_value.get('members'))
+
+    def get(self, ni_name : str, vlan_id : int) -> Dict:
+        return self._items.get((ni_name, vlan_id))
+
+    def remove(self, ni_name : str, vlan_id : int) -> None:
+        self._items.pop((ni_name, vlan_id), None)
+
+    def compose_resources(self) -> List[Dict]:
+        return compose_resources(self._items, Vlans.STRUCT)
+
+class StorageNetworkInstance:
+    def __init__(self) -> None:
+        self.network_instances = NetworkInstances()
+        self.interfaces        = Interfaces()
+        self.protocols         = Protocols()
+        self.protocol_static   = StaticRoutes()
+        self.tables            = Tables()
+        self.vlans             = Vlans()
+
+    def populate(self, resources : List[Tuple[str, Dict]]) -> None:
+        for resource_key, resource_value in resources:
+            match = RE_RESKEY_NET_INST.match(resource_key)
+            if match is not None:
+                self.network_instances.add(match.group(1), resource_value)
+                continue
+
+            match = RE_RESKEY_INTERFACE.match(resource_key)
+            if match is not None:
+                if_id = match.group(2)
+                if_id_parts = if_id.split('.')
+                if_name = if_id_parts[0]
+                sif_index = 0 if len(if_id_parts) == 1 else int(if_id_parts[1])
+                self.interfaces.add(match.group(1), if_name, sif_index)
+                continue
+
+            match = RE_RESKEY_PROTOCOL.match(resource_key)
+            if match is not None:
+                self.protocols.add(match.group(1), match.group(2))
+                continue
+
+            match = RE_RESKEY_PROTO_STATIC.match(resource_key)
+            if match is not None:
+                self.protocol_static.add(match.group(1), match.group(2), match.group(3), resource_value)
+                continue
+
+            match = RE_RESKEY_TABLE.match(resource_key)
+            if match is not None:
+                self.tables.add(match.group(1), match.group(2), match.group(3))
+                continue
+
+            match = RE_RESKEY_VLAN.match(resource_key)
+            if match is not None:
+                self.vlans.add(match.group(1), int(match.group(2)), resource_value)
+                continue
+
+            MSG = 'Unhandled Resource Key: {:s} => {:s}'
+            raise Exception(MSG.format(str(resource_key), str(resource_value)))
+
+    def get_expected_config(self) -> List[Tuple[str, Dict]]:
+        expected_config = list()
+        expected_config.extend(self.network_instances.compose_resources())
+        expected_config.extend(self.interfaces.compose_resources())
+        expected_config.extend(self.protocols.compose_resources())
+        expected_config.extend(self.protocol_static.compose_resources())
+        expected_config.extend(self.tables.compose_resources())
+        expected_config.extend(self.vlans.compose_resources())
+        return expected_config
diff --git a/src/device/tests/gnmi_openconfig/storage/Tools.py b/src/device/tests/gnmi_openconfig/storage/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e26417d98ccd40ec9685c56de253d0a414475e3
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/storage/Tools.py
@@ -0,0 +1,33 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, List, Tuple
+
+def compose_resources(
+    storage : Dict[Tuple, Dict], config_struct : List[Tuple[str, List[str]]]
+) -> List[Dict]:
+    expected_config = list()
+
+    for resource_key_fields, resource_value_data in storage.items():
+        for resource_key_template, resource_key_field_names in config_struct:
+            if isinstance(resource_key_fields, (str, int, float, bool)): resource_key_fields = (resource_key_fields,)
+            resource_key = resource_key_template.format(*resource_key_fields)
+            resource_value = {
+                field_name : resource_value_data[field_name]
+                for field_name in resource_key_field_names
+                if field_name in resource_value_data and resource_value_data[field_name] is not None
+            }
+            expected_config.append((resource_key, resource_value))
+
+    return expected_config
diff --git a/src/device/tests/gnmi_openconfig/storage/__init__.py b/src/device/tests/gnmi_openconfig/storage/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/storage/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..b09d7186b684d15172c25714d2df6458442f9e80
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py
@@ -0,0 +1,576 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging, os, pytest, time
+from typing import Dict, Tuple
+os.environ['DEVICE_EMULATED_ONLY'] = 'YES'
+
+# pylint: disable=wrong-import-position
+from device.service.drivers.gnmi_openconfig.GnmiOpenConfigDriver import GnmiOpenConfigDriver
+#from device.service.driver_api._Driver import (
+#    RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES, RESOURCE_ROUTING_POLICIES, RESOURCE_SERVICES
+#)
+
+logging.basicConfig(level=logging.DEBUG)
+#logging.getLogger('ncclient.operations.rpc').setLevel(logging.INFO)
+#logging.getLogger('ncclient.transport.parser').setLevel(logging.INFO)
+
+LOGGER = logging.getLogger(__name__)
+
+
+##### DRIVERS FIXTURE ##################################################################################################
+
+DEVICES = {
+    'SW1': {'address': '172.20.20.101', 'port': 6030, 'settings': {
+        'username': 'admin', 'password': 'admin',
+        'vendor': None, 'force_running': False, 'hostkey_verify': False, 'look_for_keys': False, 'allow_agent': False,
+        'commit_per_rule': True, 'device_params': {'name': 'default'}, 'manager_params': {'timeout' : 120}
+    }},
+    'SW2': {'address': '10.1.1.87', 'port': 830, 'settings': {
+        'username': 'ocnos', 'password': 'ocnos',
+        'vendor': None, 'force_running': False, 'hostkey_verify': False, 'look_for_keys': False, 'allow_agent': False,
+        'commit_per_rule': True, 'device_params': {'name': 'default'}, 'manager_params': {'timeout' : 120}
+    }},
+}
+
+@pytest.fixture(scope='session')
+def drivers() -> Dict[str, OpenConfigDriver]:
+    _drivers : Dict[str, OpenConfigDriver] = dict()
+    for device_name, driver_params in DEVICES.items():
+        driver = OpenConfigDriver(driver_params['address'], driver_params['port'], **(driver_params['settings']))
+        driver.Connect()
+        _drivers[device_name] = driver
+    yield _drivers
+    time.sleep(1)
+    for _,driver in _drivers.items():
+        driver.Disconnect()
+
+
+def network_instance(ni_name, ni_type, ni_router_id=None, ni_route_distinguisher=None) -> Tuple[str, Dict]:
+    path = '/network_instance[{:s}]'.format(ni_name)
+    data = {'name': ni_name, 'type': ni_type}
+    if ni_router_id is not None: data['router_id'] = ni_router_id
+    if ni_route_distinguisher is not None: data['route_distinguisher'] = ni_route_distinguisher
+    return path, json.dumps(data)
+
+def network_instance_add_protocol_bgp(ni_name, ni_type, ni_router_id, ni_bgp_as, neighbors=[]) -> Tuple[str, Dict]:
+    path = '/network_instance[{:s}]/protocols[BGP]'.format(ni_name)
+    data = {
+        'name': ni_name, 'type': ni_type, 'router_id': ni_router_id, 'identifier': 'BGP',
+        'protocol_name': ni_bgp_as, 'as': ni_bgp_as
+    }
+    if len(neighbors) > 0:
+        data['neighbors'] = [
+            {'ip_address': neighbor_ip_address, 'remote_as': neighbor_remote_as}
+            for neighbor_ip_address, neighbor_remote_as in neighbors
+        ]
+    return path, json.dumps(data)
+
+def network_instance_add_protocol_direct(ni_name, ni_type) -> Tuple[str, Dict]:
+    path = '/network_instance[{:s}]/protocols[DIRECTLY_CONNECTED]'.format(ni_name)
+    data = {
+        'name': ni_name, 'type': ni_type, 'identifier': 'DIRECTLY_CONNECTED',
+        'protocol_name': 'DIRECTLY_CONNECTED'
+    }
+    return path, json.dumps(data)
+
+def network_instance_add_protocol_static(ni_name, ni_type) -> Tuple[str, Dict]:
+    path = '/network_instance[{:s}]/protocols[STATIC]'.format(ni_name)
+    data = {
+        'name': ni_name, 'type': ni_type, 'identifier': 'STATIC',
+        'protocol_name': 'STATIC'
+    }
+    return path, json.dumps(data)
+
+#def network_instance_static_route(ni_name, prefix, next_hop, next_hop_index=0) -> Tuple[str, Dict]:
+#    path = '/network_instance[{:s}]/static_route[{:s}]'.format(ni_name, prefix)
+#    data = {'name': ni_name, 'prefix': prefix, 'next_hop': next_hop, 'next_hop_index': next_hop_index}
+#    return path, json.dumps(data)
+
+def network_instance_add_table_connection(
+    ni_name, src_protocol, dst_protocol, address_family, default_import_policy, bgp_as=None
+) -> Tuple[str, Dict]:
+    path = '/network_instance[{:s}]/table_connections[{:s}][{:s}][{:s}]'.format(
+        ni_name, src_protocol, dst_protocol, address_family
+    )
+    data = {
+        'name': ni_name, 'src_protocol': src_protocol, 'dst_protocol': dst_protocol,
+        'address_family': address_family, 'default_import_policy': default_import_policy,
+    }
+    if bgp_as is not None: data['as'] = bgp_as
+    return path, json.dumps(data)
+
+def interface(
+    name, index, description=None, if_type=None, vlan_id=None, mtu=None, ipv4_address_prefix=None, enabled=None
+) -> Tuple[str, Dict]:
+    path = '/interface[{:s}]/subinterface[{:d}]'.format(name, index)
+    data = {'name': name, 'index': index}
+    if description is not None: data['description'] = description
+    if if_type     is not None: data['type'       ] = if_type
+    if vlan_id     is not None: data['vlan_id'    ] = vlan_id
+    if mtu         is not None: data['mtu'        ] = mtu
+    if enabled     is not None: data['enabled'    ] = enabled
+    if ipv4_address_prefix is not None:
+        ipv4_address, ipv4_prefix = ipv4_address_prefix
+        data['address_ip'    ] = ipv4_address
+        data['address_prefix'] = ipv4_prefix
+    return path, json.dumps(data)
+
+def network_instance_interface(ni_name, ni_type, if_name, if_index) -> Tuple[str, Dict]:
+    path = '/network_instance[{:s}]/interface[{:s}.{:d}]'.format(ni_name, if_name, if_index)
+    data = {'name': ni_name, 'type': ni_type, 'id': if_name, 'interface': if_name, 'subinterface': if_index}
+    return path, json.dumps(data)
+
+def test_configure(drivers : Dict[str, OpenConfigDriver]):
+    #resources_to_get = []
+    #resources_to_get = [RESOURCE_ENDPOINTS]
+    #resources_to_get = [RESOURCE_INTERFACES]
+    #resources_to_get = [RESOURCE_NETWORK_INSTANCES]
+    #resources_to_get = [RESOURCE_ROUTING_POLICIES]
+    #resources_to_get = [RESOURCE_SERVICES]
+    #LOGGER.info('resources_to_get = {:s}'.format(str(resources_to_get)))
+    #results_getconfig = driver.GetConfig(resources_to_get)
+    #LOGGER.info('results_getconfig = {:s}'.format(str(results_getconfig)))
+
+    csgw1_resources_to_set = [
+        network_instance('ecoc24', 'L3VRF', '192.168.150.1', '65001:1'),
+        network_instance_add_protocol_direct('ecoc24', 'L3VRF'),
+        network_instance_add_protocol_static('ecoc24', 'L3VRF'),
+        network_instance_add_protocol_bgp('ecoc24', 'L3VRF', '192.168.150.1', '65001', neighbors=[
+            ('192.168.150.2', '65001')
+        ]),
+        network_instance_add_table_connection('ecoc24', 'DIRECTLY_CONNECTED', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'),
+        network_instance_add_table_connection('ecoc24', 'STATIC', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'),
+    
+        interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500),
+        network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0),
+        interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.10.1', 24), enabled=True),
+    
+        interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500),
+        network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0),
+        interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.150.1', 24), enabled=True),
+    ]
+    LOGGER.info('CSGW1 resources_to_set = {:s}'.format(str(csgw1_resources_to_set)))
+    results_setconfig = drivers['CSGW1'].SetConfig(csgw1_resources_to_set)
+    LOGGER.info('CSGW1 results_setconfig = {:s}'.format(str(results_setconfig)))
+
+    csgw2_resources_to_set = [
+        network_instance('ecoc24', 'L3VRF', '192.168.150.2', '65001:1'),
+        network_instance_add_protocol_direct('ecoc24', 'L3VRF'),
+        network_instance_add_protocol_static('ecoc24', 'L3VRF'),
+        network_instance_add_protocol_bgp('ecoc24', 'L3VRF', '192.168.150.2', '65001', neighbors=[
+            ('192.168.150.1', '65001')
+        ]),
+        network_instance_add_table_connection('ecoc24', 'DIRECTLY_CONNECTED', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'),
+        network_instance_add_table_connection('ecoc24', 'STATIC', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'),
+    
+        interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500),
+        network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0),
+        interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.20.1', 24), enabled=True),
+    
+        interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500),
+        network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0),
+        interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.150.2', 24), enabled=True),
+    ]
+    LOGGER.info('CSGW2 resources_to_set = {:s}'.format(str(csgw2_resources_to_set)))
+    results_setconfig = drivers['CSGW2'].SetConfig(csgw2_resources_to_set)
+    LOGGER.info('CSGW2 results_setconfig = {:s}'.format(str(results_setconfig)))
+
+    csgw1_resources_to_delete = [
+        network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0),
+        network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0),
+        #interface('ce1', 0),
+        #interface('xe5', 0),
+        network_instance('ecoc24', 'L3VRF'),
+    ]
+    LOGGER.info('CSGW1 resources_to_delete = {:s}'.format(str(csgw1_resources_to_delete)))
+    results_deleteconfig = drivers['CSGW1'].DeleteConfig(csgw1_resources_to_delete)
+    LOGGER.info('CSGW1 results_deleteconfig = {:s}'.format(str(results_deleteconfig)))
+
+    csgw2_resources_to_delete = [
+        network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0),
+        network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0),
+        #interface('ce1', 0),
+        #interface('xe5', 0),
+        network_instance('ecoc24', 'L3VRF'),
+    ]
+    LOGGER.info('CSGW2 resources_to_delete = {:s}'.format(str(csgw2_resources_to_delete)))
+    results_deleteconfig = drivers['CSGW2'].DeleteConfig(csgw2_resources_to_delete)
+    LOGGER.info('CSGW2 results_deleteconfig = {:s}'.format(str(results_deleteconfig)))
+
+
+
+
+
+
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+os.environ['DEVICE_EMULATED_ONLY'] = 'YES'
+
+# pylint: disable=wrong-import-position
+import logging, pytest, time
+from typing import Dict, List
+from device.service.driver_api._Driver import (
+    RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES,
+    RESOURCE_ROUTING_POLICIES, RESOURCE_SERVICES
+)
+from device.service.drivers.gnmi_openconfig.GnmiOpenConfigDriver import GnmiOpenConfigDriver
+from .storage.Storage import Storage
+from .tools.manage_config import (
+    check_config_endpoints, check_config_interfaces, check_config_network_instances, del_config, get_config, set_config
+)
+from .tools.check_updates import check_updates
+from .tools.request_composers import (
+    interface, network_instance, network_instance_interface, network_instance_static_route
+)
+
+logging.basicConfig(level=logging.DEBUG)
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+
+##### DRIVER FIXTURE ###################################################################################################
+
+DRIVER_SETTING_ADDRESS  = '172.20.20.101'
+DRIVER_SETTING_PORT     = 6030
+DRIVER_SETTING_USERNAME = 'admin'
+DRIVER_SETTING_PASSWORD = 'admin'
+DRIVER_SETTING_USE_TLS  = False
+
+@pytest.fixture(scope='session')
+def driver() -> GnmiOpenConfigDriver:
+    _driver = GnmiOpenConfigDriver(
+        DRIVER_SETTING_ADDRESS, DRIVER_SETTING_PORT,
+        username=DRIVER_SETTING_USERNAME,
+        password=DRIVER_SETTING_PASSWORD,
+        use_tls=DRIVER_SETTING_USE_TLS,
+    )
+    _driver.Connect()
+    yield _driver
+    time.sleep(1)
+    _driver.Disconnect()
+
+
+##### STORAGE FIXTURE ##################################################################################################
+
+@pytest.fixture(scope='session')
+def storage() -> Dict:
+    yield Storage()
+
+
+##### NETWORK INSTANCE DETAILS #########################################################################################
+
+NETWORK_INSTANCES = [
+    {
+        'name': 'test-l3-svc',
+        'type': 'L3VRF',
+        'interfaces': [
+            {'name': 'Ethernet1',  'index': 0, 'ipv4_addr': '192.168.1.1',  'ipv4_prefix': 24, 'enabled': True},
+            {'name': 'Ethernet10', 'index': 0, 'ipv4_addr': '192.168.10.1', 'ipv4_prefix': 24, 'enabled': True},
+        ],
+        'static_routes': [
+            {'prefix': '172.0.0.0/24', 'next_hop': '172.16.0.2', 'metric': 1},
+            {'prefix': '172.2.0.0/24', 'next_hop': '172.16.0.3', 'metric': 1},
+        ]
+    },
+    #{
+    #    'name': 'test-l2-svc',
+    #    'type': 'L2VSI',
+    #    'interfaces': [
+    #        {'name': 'Ethernet2', 'index': 0, 'ipv4_addr': '192.168.1.1',  'ipv4_prefix': 24, 'enabled': True},
+    #        {'name': 'Ethernet4', 'index': 0, 'ipv4_addr': '192.168.10.1', 'ipv4_prefix': 24, 'enabled': True},
+    #    ],
+    #    'static_routes': [
+    #        {'prefix': '172.0.0.0/24', 'next_hop': '172.16.0.2', 'metric': 1},
+    #        {'prefix': '172.2.0.0/24', 'next_hop': '172.16.0.3', 'metric': 1},
+    #    ]
+    #}
+]
+
+
+##### TEST METHODS #####################################################################################################
+
+def test_get_endpoints(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    results_getconfig = get_config(driver, [RESOURCE_ENDPOINTS])
+    storage.endpoints.populate(results_getconfig)
+    check_config_endpoints(driver, storage)
+
+
+def test_get_interfaces(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    results_getconfig = get_config(driver, [RESOURCE_INTERFACES])
+    storage.interfaces.populate(results_getconfig)
+    check_config_interfaces(driver, storage)
+
+
+def test_get_network_instances(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    results_getconfig = get_config(driver, [RESOURCE_NETWORK_INSTANCES])
+    storage.network_instances.populate(results_getconfig)
+    check_config_network_instances(driver, storage)
+
+
+def test_set_network_instances(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    check_config_network_instances(driver, storage)
+
+    resources_to_set = list()
+    ni_names = list()
+    for ni in NETWORK_INSTANCES:
+        ni_name = ni['name']
+        ni_type = ni['type']
+        resources_to_set.append(network_instance(ni_name, ni_type))
+        ni_names.append(ni_name)
+        storage.network_instances.network_instances.add(ni_name, {'type': ni_type})
+        storage.network_instances.protocols.add(ni_name, 'DIRECTLY_CONNECTED')
+        storage.network_instances.tables.add(ni_name, 'DIRECTLY_CONNECTED', 'IPV4')
+        storage.network_instances.tables.add(ni_name, 'DIRECTLY_CONNECTED', 'IPV6')
+
+    results_setconfig = set_config(driver, resources_to_set)
+    check_updates(results_setconfig, '/network_instance[{:s}]', ni_names)
+
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
+
+
+def test_add_interfaces_to_network_instance(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    check_config_network_instances(driver, storage)
+
+    resources_to_set = list()
+    ni_if_names = list()
+    for ni in NETWORK_INSTANCES:
+        ni_name = ni['name']
+        for ni_if in ni.get('interfaces', list()):
+            if_name     = ni_if['name' ]
+            subif_index = ni_if['index']
+            resources_to_set.append(network_instance_interface(ni_name, if_name, subif_index))
+            ni_if_names.append((ni_name, '{:s}.{:d}'.format(if_name, subif_index)))
+            storage.network_instances.interfaces.add(ni_name, if_name, subif_index)
+
+    results_setconfig = set_config(driver, resources_to_set)
+    check_updates(results_setconfig, '/network_instance[{:s}]/interface[{:s}]', ni_if_names)
+
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
+
+
+def test_set_interfaces(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    check_config_network_instances(driver, storage)
+
+    resources_to_set = list()
+    if_names = list()
+    for ni in NETWORK_INSTANCES:
+        for ni_if in ni.get('interfaces', list()):
+            if_name      = ni_if['name'       ]
+            subif_index  = ni_if['index'      ]
+            ipv4_address = ni_if['ipv4_addr'  ]
+            ipv4_prefix  = ni_if['ipv4_prefix']
+            enabled      = ni_if['enabled'    ]
+            resources_to_set.append(interface(
+                if_name, subif_index, ipv4_address, ipv4_prefix, enabled
+            ))
+            if_names.append(if_name)
+            storage.interfaces.ipv4_addresses.add(if_name, subif_index, ipv4_address, {
+                'origin' : 'STATIC', 'prefix': ipv4_prefix
+            })
+            default_vlan = storage.network_instances.vlans.get('default', 1)
+            default_vlan_members : List[str] = default_vlan.setdefault('members', list())
+            if if_name in default_vlan_members: default_vlan_members.remove(if_name)
+
+    results_setconfig = set_config(driver, resources_to_set)
+    check_updates(results_setconfig, '/interface[{:s}]', if_names)
+
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
+
+
+def test_set_network_instance_static_routes(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    check_config_network_instances(driver, storage)
+
+    resources_to_set = list()
+    ni_sr_prefixes = list()
+    for ni in NETWORK_INSTANCES:
+        ni_name = ni['name']
+        for ni_sr in ni.get('static_routes', list()):
+            ni_sr_prefix   = ni_sr['prefix'  ]
+            ni_sr_next_hop = ni_sr['next_hop']
+            ni_sr_metric   = ni_sr['metric'  ]
+            ni_sr_next_hop_index = 'AUTO_{:d}_{:s}'.format(ni_sr_metric, '-'.join(ni_sr_next_hop.split('.')))
+            resources_to_set.append(network_instance_static_route(
+                ni_name, ni_sr_prefix, ni_sr_next_hop_index, ni_sr_next_hop, metric=ni_sr_metric
+            ))
+            ni_sr_prefixes.append((ni_name, ni_sr_prefix))
+            storage.network_instances.protocols.add(ni_name, 'STATIC')
+            storage.network_instances.protocol_static.add(ni_name, 'STATIC', ni_sr_prefix, {
+                'prefix': ni_sr_prefix, 'next_hops': {
+                    ni_sr_next_hop_index: {'next_hop': ni_sr_next_hop, 'metric': ni_sr_metric}
+                }
+            })
+            storage.network_instances.tables.add(ni_name, 'STATIC', 'IPV4')
+            storage.network_instances.tables.add(ni_name, 'STATIC', 'IPV6')
+
+    results_setconfig = set_config(driver, resources_to_set)
+    check_updates(results_setconfig, '/network_instance[{:s}]/static_route[{:s}]', ni_sr_prefixes)
+
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
+
+
+def test_del_network_instance_static_routes(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    check_config_network_instances(driver, storage)
+
+    resources_to_delete = list()
+    ni_sr_prefixes = list()
+    for ni in NETWORK_INSTANCES:
+        ni_name = ni['name']
+        for ni_sr in ni.get('static_routes', list()):
+            ni_sr_prefix   = ni_sr['prefix'  ]
+            ni_sr_next_hop = ni_sr['next_hop']
+            ni_sr_metric   = ni_sr['metric'  ]
+            ni_sr_next_hop_index = 'AUTO_{:d}_{:s}'.format(ni_sr_metric, '-'.join(ni_sr_next_hop.split('.')))
+            resources_to_delete.append(network_instance_static_route(
+                ni_name, ni_sr_prefix, ni_sr_next_hop_index, ni_sr_next_hop, metric=ni_sr_metric
+            ))
+            ni_sr_prefixes.append((ni_name, ni_sr_prefix))
+
+            storage.network_instances.protocols.remove(ni_name, 'STATIC')
+            storage.network_instances.protocol_static.remove(ni_name, 'STATIC', ni_sr_prefix)
+            storage.network_instances.tables.remove(ni_name, 'STATIC', 'IPV4')
+            storage.network_instances.tables.remove(ni_name, 'STATIC', 'IPV6')
+
+    results_deleteconfig = del_config(driver, resources_to_delete)
+    check_updates(results_deleteconfig, '/network_instance[{:s}]/static_route[{:s}]', ni_sr_prefixes)
+
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    #check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
+
+
+def test_del_interfaces(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    #check_config_network_instances(driver, storage)
+
+    resources_to_delete = list()
+    if_names = list()
+    for ni in NETWORK_INSTANCES:
+        for ni_if in ni.get('interfaces', list()):
+            if_name      = ni_if['name'       ]
+            subif_index  = ni_if['index'      ]
+            ipv4_address = ni_if['ipv4_addr'  ]
+            ipv4_prefix  = ni_if['ipv4_prefix']
+            enabled      = ni_if['enabled'    ]
+            resources_to_delete.append(interface(if_name, subif_index, ipv4_address, ipv4_prefix, enabled))
+            if_names.append(if_name)
+            storage.interfaces.ipv4_addresses.remove(if_name, subif_index, ipv4_address)
+            default_vlan = storage.network_instances.vlans.get('default', 1)
+            default_vlan_members : List[str] = default_vlan.setdefault('members', list())
+            if if_name not in default_vlan_members: default_vlan_members.append(if_name)
+
+    results_deleteconfig = del_config(driver, resources_to_delete)
+    check_updates(results_deleteconfig, '/interface[{:s}]', if_names)
+
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    #check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
+
+
+def test_del_interfaces_from_network_instance(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    #check_config_network_instances(driver, storage)
+
+    resources_to_delete = list()
+    ni_if_names = list()
+    for ni in NETWORK_INSTANCES:
+        ni_name = ni['name']
+        for ni_if in ni.get('interfaces', list()):
+            if_name     = ni_if['name' ]
+            subif_index = ni_if['index']
+            resources_to_delete.append(network_instance_interface(ni_name, if_name, subif_index))
+            ni_if_names.append((ni_name, '{:s}.{:d}'.format(if_name, subif_index)))
+            storage.network_instances.interfaces.remove(ni_name, if_name, subif_index)
+
+    results_deleteconfig = del_config(driver, resources_to_delete)
+    check_updates(results_deleteconfig, '/network_instance[{:s}]/interface[{:s}]', ni_if_names)
+    
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    #check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
+
+
+def test_del_network_instances(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    #check_config_network_instances(driver, storage)
+
+    resources_to_delete = list()
+    ni_names = list()
+    for ni in NETWORK_INSTANCES:
+        ni_name = ni['name']
+        ni_type = ni['type']
+        resources_to_delete.append(network_instance(ni_name, ni_type))
+        ni_names.append(ni_name)
+        storage.network_instances.network_instances.remove(ni_name)
+        storage.network_instances.protocols.remove(ni_name, 'DIRECTLY_CONNECTED')
+        storage.network_instances.tables.remove(ni_name, 'DIRECTLY_CONNECTED', 'IPV4')
+        storage.network_instances.tables.remove(ni_name, 'DIRECTLY_CONNECTED', 'IPV6')
+
+    results_deleteconfig = del_config(driver, resources_to_delete)
+    check_updates(results_deleteconfig, '/network_instance[{:s}]', ni_names)
+
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
diff --git a/src/device/tests/gnmi_openconfig/test_unitary_gnmi_openconfig.py b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_openconfig.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ec2e2f5f808ad5caa12c7dd1effb927bc4dd068
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_openconfig.py
@@ -0,0 +1,360 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+os.environ['DEVICE_EMULATED_ONLY'] = 'YES'
+
+# pylint: disable=wrong-import-position
+import logging, pytest, time
+from typing import Dict, List
+from device.service.driver_api._Driver import (
+    RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES,
+    RESOURCE_ROUTING_POLICIES, RESOURCE_SERVICES
+)
+from device.service.drivers.gnmi_openconfig.GnmiOpenConfigDriver import GnmiOpenConfigDriver
+from .storage.Storage import Storage
+from .tools.manage_config import (
+    check_config_endpoints, check_config_interfaces, check_config_network_instances, del_config, get_config, set_config
+)
+from .tools.check_updates import check_updates
+from .tools.request_composers import (
+    interface, network_instance, network_instance_interface, network_instance_static_route
+)
+
+logging.basicConfig(level=logging.DEBUG)
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+
+##### DRIVER FIXTURE ###################################################################################################
+
+DRIVER_SETTING_ADDRESS  = '172.20.20.101'
+DRIVER_SETTING_PORT     = 6030
+DRIVER_SETTING_USERNAME = 'admin'
+DRIVER_SETTING_PASSWORD = 'admin'
+DRIVER_SETTING_USE_TLS  = False
+
+@pytest.fixture(scope='session')
+def driver() -> GnmiOpenConfigDriver:
+    _driver = GnmiOpenConfigDriver(
+        DRIVER_SETTING_ADDRESS, DRIVER_SETTING_PORT,
+        username=DRIVER_SETTING_USERNAME,
+        password=DRIVER_SETTING_PASSWORD,
+        use_tls=DRIVER_SETTING_USE_TLS,
+    )
+    _driver.Connect()
+    yield _driver
+    time.sleep(1)
+    _driver.Disconnect()
+
+
+##### STORAGE FIXTURE ##################################################################################################
+
+@pytest.fixture(scope='session')
+def storage() -> Dict:
+    yield Storage()
+
+
+##### NETWORK INSTANCE DETAILS #########################################################################################
+
+NETWORK_INSTANCES = [
+    {
+        'name': 'test-l3-svc',
+        'type': 'L3VRF',
+        'interfaces': [
+            {'name': 'Ethernet1',  'index': 0, 'ipv4_addr': '192.168.1.1',  'ipv4_prefix': 24, 'enabled': True},
+            {'name': 'Ethernet10', 'index': 0, 'ipv4_addr': '192.168.10.1', 'ipv4_prefix': 24, 'enabled': True},
+        ],
+        'static_routes': [
+            {'prefix': '172.0.0.0/24', 'next_hop': '172.16.0.2', 'metric': 1},
+            {'prefix': '172.2.0.0/24', 'next_hop': '172.16.0.3', 'metric': 1},
+        ]
+    },
+    #{
+    #    'name': 'test-l2-svc',
+    #    'type': 'L2VSI',
+    #    'interfaces': [
+    #        {'name': 'Ethernet2', 'index': 0, 'ipv4_addr': '192.168.1.1',  'ipv4_prefix': 24, 'enabled': True},
+    #        {'name': 'Ethernet4', 'index': 0, 'ipv4_addr': '192.168.10.1', 'ipv4_prefix': 24, 'enabled': True},
+    #    ],
+    #    'static_routes': [
+    #        {'prefix': '172.0.0.0/24', 'next_hop': '172.16.0.2', 'metric': 1},
+    #        {'prefix': '172.2.0.0/24', 'next_hop': '172.16.0.3', 'metric': 1},
+    #    ]
+    #}
+]
+
+
+##### TEST METHODS #####################################################################################################
+
+def test_get_endpoints(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    results_getconfig = get_config(driver, [RESOURCE_ENDPOINTS])
+    storage.endpoints.populate(results_getconfig)
+    check_config_endpoints(driver, storage)
+
+
+def test_get_interfaces(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    results_getconfig = get_config(driver, [RESOURCE_INTERFACES])
+    storage.interfaces.populate(results_getconfig)
+    check_config_interfaces(driver, storage)
+
+
+def test_get_network_instances(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    results_getconfig = get_config(driver, [RESOURCE_NETWORK_INSTANCES])
+    storage.network_instances.populate(results_getconfig)
+    check_config_network_instances(driver, storage)
+
+
+def test_set_network_instances(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    check_config_network_instances(driver, storage)
+
+    resources_to_set = list()
+    ni_names = list()
+    for ni in NETWORK_INSTANCES:
+        ni_name = ni['name']
+        ni_type = ni['type']
+        resources_to_set.append(network_instance(ni_name, ni_type))
+        ni_names.append(ni_name)
+        storage.network_instances.network_instances.add(ni_name, {'type': ni_type})
+        storage.network_instances.protocols.add(ni_name, 'DIRECTLY_CONNECTED')
+        storage.network_instances.tables.add(ni_name, 'DIRECTLY_CONNECTED', 'IPV4')
+        storage.network_instances.tables.add(ni_name, 'DIRECTLY_CONNECTED', 'IPV6')
+
+    results_setconfig = set_config(driver, resources_to_set)
+    check_updates(results_setconfig, '/network_instance[{:s}]', ni_names)
+
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
+
+
+def test_add_interfaces_to_network_instance(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    check_config_network_instances(driver, storage)
+
+    resources_to_set = list()
+    ni_if_names = list()
+    for ni in NETWORK_INSTANCES:
+        ni_name = ni['name']
+        for ni_if in ni.get('interfaces', list()):
+            if_name     = ni_if['name' ]
+            subif_index = ni_if['index']
+            resources_to_set.append(network_instance_interface(ni_name, if_name, subif_index))
+            ni_if_names.append((ni_name, '{:s}.{:d}'.format(if_name, subif_index)))
+            storage.network_instances.interfaces.add(ni_name, if_name, subif_index)
+
+    results_setconfig = set_config(driver, resources_to_set)
+    check_updates(results_setconfig, '/network_instance[{:s}]/interface[{:s}]', ni_if_names)
+
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
+
+
+def test_set_interfaces(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    check_config_network_instances(driver, storage)
+
+    resources_to_set = list()
+    if_names = list()
+    for ni in NETWORK_INSTANCES:
+        for ni_if in ni.get('interfaces', list()):
+            if_name      = ni_if['name'       ]
+            subif_index  = ni_if['index'      ]
+            ipv4_address = ni_if['ipv4_addr'  ]
+            ipv4_prefix  = ni_if['ipv4_prefix']
+            enabled      = ni_if['enabled'    ]
+            resources_to_set.append(interface(
+                if_name, subif_index, ipv4_address, ipv4_prefix, enabled
+            ))
+            if_names.append(if_name)
+            storage.interfaces.ipv4_addresses.add(if_name, subif_index, ipv4_address, {
+                'origin' : 'STATIC', 'prefix': ipv4_prefix
+            })
+            default_vlan = storage.network_instances.vlans.get('default', 1)
+            default_vlan_members : List[str] = default_vlan.setdefault('members', list())
+            if if_name in default_vlan_members: default_vlan_members.remove(if_name)
+
+    results_setconfig = set_config(driver, resources_to_set)
+    check_updates(results_setconfig, '/interface[{:s}]', if_names)
+
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
+
+
+def test_set_network_instance_static_routes(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    check_config_network_instances(driver, storage)
+
+    resources_to_set = list()
+    ni_sr_prefixes = list()
+    for ni in NETWORK_INSTANCES:
+        ni_name = ni['name']
+        for ni_sr in ni.get('static_routes', list()):
+            ni_sr_prefix   = ni_sr['prefix'  ]
+            ni_sr_next_hop = ni_sr['next_hop']
+            ni_sr_metric   = ni_sr['metric'  ]
+            ni_sr_next_hop_index = 'AUTO_{:d}_{:s}'.format(ni_sr_metric, '-'.join(ni_sr_next_hop.split('.')))
+            resources_to_set.append(network_instance_static_route(
+                ni_name, ni_sr_prefix, ni_sr_next_hop_index, ni_sr_next_hop, metric=ni_sr_metric
+            ))
+            ni_sr_prefixes.append((ni_name, ni_sr_prefix))
+            storage.network_instances.protocols.add(ni_name, 'STATIC')
+            storage.network_instances.protocol_static.add(ni_name, 'STATIC', ni_sr_prefix, {
+                'prefix': ni_sr_prefix, 'next_hops': {
+                    ni_sr_next_hop_index: {'next_hop': ni_sr_next_hop, 'metric': ni_sr_metric}
+                }
+            })
+            storage.network_instances.tables.add(ni_name, 'STATIC', 'IPV4')
+            storage.network_instances.tables.add(ni_name, 'STATIC', 'IPV6')
+
+    results_setconfig = set_config(driver, resources_to_set)
+    check_updates(results_setconfig, '/network_instance[{:s}]/static_route[{:s}]', ni_sr_prefixes)
+
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
+
+
+def test_del_network_instance_static_routes(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    check_config_network_instances(driver, storage)
+
+    resources_to_delete = list()
+    ni_sr_prefixes = list()
+    for ni in NETWORK_INSTANCES:
+        ni_name = ni['name']
+        for ni_sr in ni.get('static_routes', list()):
+            ni_sr_prefix   = ni_sr['prefix'  ]
+            ni_sr_next_hop = ni_sr['next_hop']
+            ni_sr_metric   = ni_sr['metric'  ]
+            ni_sr_next_hop_index = 'AUTO_{:d}_{:s}'.format(ni_sr_metric, '-'.join(ni_sr_next_hop.split('.')))
+            resources_to_delete.append(network_instance_static_route(
+                ni_name, ni_sr_prefix, ni_sr_next_hop_index, ni_sr_next_hop, metric=ni_sr_metric
+            ))
+            ni_sr_prefixes.append((ni_name, ni_sr_prefix))
+
+            storage.network_instances.protocols.remove(ni_name, 'STATIC')
+            storage.network_instances.protocol_static.remove(ni_name, 'STATIC', ni_sr_prefix)
+            storage.network_instances.tables.remove(ni_name, 'STATIC', 'IPV4')
+            storage.network_instances.tables.remove(ni_name, 'STATIC', 'IPV6')
+
+    results_deleteconfig = del_config(driver, resources_to_delete)
+    check_updates(results_deleteconfig, '/network_instance[{:s}]/static_route[{:s}]', ni_sr_prefixes)
+
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    #check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
+
+
+def test_del_interfaces(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    #check_config_network_instances(driver, storage)
+
+    resources_to_delete = list()
+    if_names = list()
+    for ni in NETWORK_INSTANCES:
+        for ni_if in ni.get('interfaces', list()):
+            if_name      = ni_if['name'       ]
+            subif_index  = ni_if['index'      ]
+            ipv4_address = ni_if['ipv4_addr'  ]
+            ipv4_prefix  = ni_if['ipv4_prefix']
+            enabled      = ni_if['enabled'    ]
+            resources_to_delete.append(interface(if_name, subif_index, ipv4_address, ipv4_prefix, enabled))
+            if_names.append(if_name)
+            storage.interfaces.ipv4_addresses.remove(if_name, subif_index, ipv4_address)
+            default_vlan = storage.network_instances.vlans.get('default', 1)
+            default_vlan_members : List[str] = default_vlan.setdefault('members', list())
+            if if_name not in default_vlan_members: default_vlan_members.append(if_name)
+
+    results_deleteconfig = del_config(driver, resources_to_delete)
+    check_updates(results_deleteconfig, '/interface[{:s}]', if_names)
+
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    #check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
+
+
+def test_del_interfaces_from_network_instance(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    #check_config_network_instances(driver, storage)
+
+    resources_to_delete = list()
+    ni_if_names = list()
+    for ni in NETWORK_INSTANCES:
+        ni_name = ni['name']
+        for ni_if in ni.get('interfaces', list()):
+            if_name     = ni_if['name' ]
+            subif_index = ni_if['index']
+            resources_to_delete.append(network_instance_interface(ni_name, if_name, subif_index))
+            ni_if_names.append((ni_name, '{:s}.{:d}'.format(if_name, subif_index)))
+            storage.network_instances.interfaces.remove(ni_name, if_name, subif_index)
+
+    results_deleteconfig = del_config(driver, resources_to_delete)
+    check_updates(results_deleteconfig, '/network_instance[{:s}]/interface[{:s}]', ni_if_names)
+    
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    #check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
+
+
+def test_del_network_instances(
+    driver : GnmiOpenConfigDriver,  # pylint: disable=redefined-outer-name
+    storage : Storage,              # pylint: disable=redefined-outer-name
+) -> None:
+    check_config_interfaces(driver, storage)
+    #check_config_network_instances(driver, storage)
+
+    resources_to_delete = list()
+    ni_names = list()
+    for ni in NETWORK_INSTANCES:
+        ni_name = ni['name']
+        ni_type = ni['type']
+        resources_to_delete.append(network_instance(ni_name, ni_type))
+        ni_names.append(ni_name)
+        storage.network_instances.network_instances.remove(ni_name)
+        storage.network_instances.protocols.remove(ni_name, 'DIRECTLY_CONNECTED')
+        storage.network_instances.tables.remove(ni_name, 'DIRECTLY_CONNECTED', 'IPV4')
+        storage.network_instances.tables.remove(ni_name, 'DIRECTLY_CONNECTED', 'IPV6')
+
+    results_deleteconfig = del_config(driver, resources_to_delete)
+    check_updates(results_deleteconfig, '/network_instance[{:s}]', ni_names)
+
+    check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0)
+    check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0)
diff --git a/src/device/tests/gnmi_openconfig/tools/__init__.py b/src/device/tests/gnmi_openconfig/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/tools/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/device/tests/gnmi_openconfig/tools/check_updates.py b/src/device/tests/gnmi_openconfig/tools/check_updates.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e120ab89ac508623ef27b6e6a6f792b6c565840
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/tools/check_updates.py
@@ -0,0 +1,22 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Iterable, List, Tuple
+
+def check_updates(results : Iterable[Tuple[str, bool]], format_str : str, item_ids : List[Tuple]) -> None:
+    results = set(results)
+    assert len(results) == len(item_ids)
+    for item_id_fields in item_ids:
+        if isinstance(item_id_fields, (str, int, float, bool)): item_id_fields = (item_id_fields,)
+        assert (format_str.format(*item_id_fields), True) in results
diff --git a/src/device/tests/gnmi_openconfig/tools/manage_config.py b/src/device/tests/gnmi_openconfig/tools/manage_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..6dc485bf256236bf9438536e9182181f784faacd
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/tools/manage_config.py
@@ -0,0 +1,103 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy, deepdiff, logging, time
+from typing import Callable, Dict, List, Tuple, Union
+from device.service.driver_api._Driver import (
+    RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES,
+    RESOURCE_ROUTING_POLICIES, RESOURCE_SERVICES
+)
+from device.service.drivers.gnmi_openconfig.GnmiOpenConfigDriver import GnmiOpenConfigDriver
+from device.tests.gnmi_openconfig.storage.Storage import Storage
+from .result_config_adapters import adapt_endpoint, adapt_interface, adapt_network_instance
+
+LOGGER = logging.getLogger(__name__)
+
+def get_config(driver : GnmiOpenConfigDriver, resources_to_get : List[str]) -> List[Tuple[str, Dict]]:
+    LOGGER.info('[get_config] resources_to_get = {:s}'.format(str(resources_to_get)))
+    results_getconfig = driver.GetConfig(resources_to_get)
+    LOGGER.info('[get_config] results_getconfig = {:s}'.format(str(results_getconfig)))
+    return results_getconfig
+
+def set_config(
+    driver : GnmiOpenConfigDriver, resources_to_set : List[Tuple[str, Dict]]
+) -> List[Tuple[str, Union[bool, Exception]]]:
+    LOGGER.info('[set_config] resources_to_set = {:s}'.format(str(resources_to_set)))
+    results_setconfig = driver.SetConfig(resources_to_set)
+    LOGGER.info('[set_config] results_setconfig = {:s}'.format(str(results_setconfig)))
+    return results_setconfig
+
+def del_config(
+    driver : GnmiOpenConfigDriver, resources_to_delete : List[Tuple[str, Dict]]
+) -> List[Tuple[str, Union[bool, Exception]]]:
+    LOGGER.info('resources_to_delete = {:s}'.format(str(resources_to_delete)))
+    results_deleteconfig = driver.DeleteConfig(resources_to_delete)
+    LOGGER.info('results_deleteconfig = {:s}'.format(str(results_deleteconfig)))
+    return results_deleteconfig
+
+def check_expected_config(
+    driver : GnmiOpenConfigDriver, resources_to_get : List[str], expected_config : List[Dict],
+    func_adapt_returned_config : Callable[[Tuple[str, Dict]], Tuple[str, Dict]] = lambda x: x,
+    max_retries : int = 1, retry_delay : float = 0.5
+) -> List[Dict]:
+    LOGGER.info('expected_config = {:s}'.format(str(expected_config)))
+
+    num_retry = 0
+    return_data = None
+    while num_retry < max_retries:
+        results_getconfig = get_config(driver, resources_to_get)
+        return_data = copy.deepcopy(results_getconfig)
+
+        results_getconfig = [
+            func_adapt_returned_config(resource_key, resource_value)
+            for resource_key, resource_value in results_getconfig
+        ]
+
+        diff_data = deepdiff.DeepDiff(sorted(expected_config), sorted(results_getconfig))
+        num_diffs = len(diff_data)
+        if num_diffs == 0: break
+        # let the device take some time to reconfigure
+        time.sleep(retry_delay)
+        num_retry += 1
+
+    if num_diffs > 0: LOGGER.error('Differences[{:d}]:\n{:s}'.format(num_diffs, str(diff_data.pretty())))
+    assert num_diffs == 0
+    return return_data
+
+def check_config_endpoints(
+    driver : GnmiOpenConfigDriver, storage : Storage,
+    max_retries : int = 1, retry_delay : float = 0.5
+) -> List[Dict]:
+    return check_expected_config(
+        driver, [RESOURCE_ENDPOINTS], storage.endpoints.get_expected_config(),
+        adapt_endpoint, max_retries=max_retries, retry_delay=retry_delay
+    )
+
+def check_config_interfaces(
+    driver : GnmiOpenConfigDriver, storage : Storage,
+    max_retries : int = 1, retry_delay : float = 0.5
+) -> List[Dict]:
+    return check_expected_config(
+        driver, [RESOURCE_INTERFACES], storage.interfaces.get_expected_config(),
+        adapt_interface, max_retries=max_retries, retry_delay=retry_delay
+    )
+
+def check_config_network_instances(
+    driver : GnmiOpenConfigDriver, storage : Storage,
+    max_retries : int = 1, retry_delay : float = 0.5
+) -> List[Dict]:
+    return check_expected_config(
+        driver, [RESOURCE_NETWORK_INSTANCES], storage.network_instances.get_expected_config(),
+        adapt_network_instance, max_retries=max_retries, retry_delay=retry_delay
+    )
diff --git a/src/device/tests/gnmi_openconfig/tools/request_composers copy.py b/src/device/tests/gnmi_openconfig/tools/request_composers copy.py
new file mode 100644
index 0000000000000000000000000000000000000000..9545e156df2c88ea1c86f094b770e09328b368e6
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/tools/request_composers copy.py	
@@ -0,0 +1,46 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, Tuple
+
+def interface(if_name, sif_index, ipv4_address, ipv4_prefix, enabled) -> Tuple[str, Dict]:
+    str_path = '/interface[{:s}]'.format(if_name)
+    str_data = {
+        'name': if_name, 'enabled': enabled, 'sub_if_index': sif_index, 'sub_if_enabled': enabled,
+        'sub_if_ipv4_enabled': enabled, 'sub_if_ipv4_address': ipv4_address, 'sub_if_ipv4_prefix': ipv4_prefix
+    }
+    return str_path, str_data
+
+def network_instance(ni_name, ni_type) -> Tuple[str, Dict]:
+    str_path = '/network_instance[{:s}]'.format(ni_name)
+    str_data = {
+        'name': ni_name, 'type': ni_type
+    }
+    return str_path, str_data
+
+def network_instance_static_route(ni_name, prefix, next_hop_index, next_hop, metric=1) -> Tuple[str, Dict]:
+    str_path = '/network_instance[{:s}]/static_route[{:s}]'.format(ni_name, prefix)
+    str_data = {
+        'name': ni_name, 'identifier': 'STATIC', 'protocol_name': 'STATIC',
+        'prefix': prefix, 'index': next_hop_index, 'next_hop': next_hop, 'metric': metric
+    }
+    return str_path, str_data
+
+def network_instance_interface(ni_name, if_name, sif_index) -> Tuple[str, Dict]:
+    ni_if_id = '{:s}.{:d}'.format(if_name, sif_index)
+    str_path = '/network_instance[{:s}]/interface[{:s}]'.format(ni_name, ni_if_id)
+    str_data = {
+        'name': ni_name, 'id': ni_if_id, 'interface': if_name, 'subinterface': sif_index
+    }
+    return str_path, str_data
diff --git a/src/device/tests/gnmi_openconfig/tools/request_composers.py b/src/device/tests/gnmi_openconfig/tools/request_composers.py
new file mode 100644
index 0000000000000000000000000000000000000000..d80709114bdf36c1e634047256c7e42d638de3c5
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/tools/request_composers.py
@@ -0,0 +1,44 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, Tuple
+
+def interface(if_name, sif_index, ipv4_address, ipv4_prefix, enabled) -> Tuple[str, Dict]:
+    str_path = '/interface[{:s}]'.format(if_name)
+    str_data = {
+        'name': if_name, 'enabled': enabled, 'sub_if_index': sif_index, 'sub_if_enabled': enabled,
+        'sub_if_ipv4_enabled': enabled, 'sub_if_ipv4_address': ipv4_address, 'sub_if_ipv4_prefix': ipv4_prefix
+    }
+    return str_path, str_data
+
+def network_instance(ni_name, ni_type) -> Tuple[str, Dict]:
+    str_path = '/network_instance[{:s}]'.format(ni_name)
+    str_data = {
+        'name': ni_name, 'type': ni_type
+    }
+    return str_path, str_data
+
+def network_instance_static_route(ni_name, prefix, next_hop_index, next_hop, metric=1) -> Tuple[str, Dict]:
+    str_path = '/network_instance[{:s}]/static_route[{:s}]'.format(ni_name, prefix)
+    str_data = {
+        'name': ni_name, 'prefix': prefix, 'next_hop_index': next_hop_index, 'next_hop': next_hop, 'metric': metric
+    }
+    return str_path, str_data
+
+def network_instance_interface(ni_name, if_name, sif_index) -> Tuple[str, Dict]:
+    str_path = '/network_instance[{:s}]/interface[{:s}.{:d}]'.format(ni_name, if_name, sif_index)
+    str_data = {
+        'name': ni_name, 'if_name': if_name, 'sif_index': sif_index
+    }
+    return str_path, str_data
diff --git a/src/device/tests/gnmi_openconfig/tools/result_config_adapters.py b/src/device/tests/gnmi_openconfig/tools/result_config_adapters.py
new file mode 100644
index 0000000000000000000000000000000000000000..db7d5735d05eba9e4b8a2bec3b2763bdb3c3cc42
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/tools/result_config_adapters.py
@@ -0,0 +1,29 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+from typing import Dict, Tuple
+
+def adapt_endpoint(resource_key : str, resource_value : Dict) -> Tuple[str, Dict]:
+    return resource_key, resource_value
+
+def adapt_interface(resource_key : str, resource_value : Dict) -> Tuple[str, Dict]:
+    return resource_key, resource_value
+
+def adapt_network_instance(resource_key : str, resource_value : Dict) -> Tuple[str, Dict]:
+    match = re.match(r'^\/network\_instance\[([^\]]+)\]\/vlan\[([^\]]+)\]$', resource_key)
+    if match is not None:
+        members = resource_value.get('members')
+        if len(members) > 0: resource_value['members'] = sorted(members)
+    return resource_key, resource_value
diff --git a/src/device/tests/test_gnmi.py b/src/device/tests/test_gnmi.py
deleted file mode 100644
index ebd026a206ddf9444d30c01e8ac2d097307cc0db..0000000000000000000000000000000000000000
--- a/src/device/tests/test_gnmi.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging, os, sys, time
-from typing import Dict, Tuple
-os.environ['DEVICE_EMULATED_ONLY'] = 'YES'
-from device.service.drivers.gnmi_openconfig.GnmiOpenConfigDriver import GnmiOpenConfigDriver # pylint: disable=wrong-import-position
-#from device.service.driver_api._Driver import (
-#    RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES, RESOURCE_ROUTING_POLICIES, RESOURCE_SERVICES
-#)
-
-logging.basicConfig(level=logging.DEBUG)
-LOGGER = logging.getLogger(__name__)
-LOGGER.setLevel(logging.DEBUG)
-
-# +---+---------------------------+--------------+---------------------------------+-------+---------+--------------------+--------------+
-# | # |           Name            | Container ID |              Image              | Kind  |  State  |    IPv4 Address    | IPv6 Address |
-# +---+---------------------------+--------------+---------------------------------+-------+---------+--------------------+--------------+
-# | 1 | clab-tfs-scenario-client1 | a8d48ec3265a | ghcr.io/hellt/network-multitool | linux | running | 172.100.100.201/24 | N/A          |
-# | 2 | clab-tfs-scenario-client2 | fc88436d2b32 | ghcr.io/hellt/network-multitool | linux | running | 172.100.100.202/24 | N/A          |
-# | 3 | clab-tfs-scenario-srl1    | b995b9bdadda | ghcr.io/nokia/srlinux           | srl   | running | 172.100.100.101/24 | N/A          |
-# | 4 | clab-tfs-scenario-srl2    | aacfc38cc376 | ghcr.io/nokia/srlinux           | srl   | running | 172.100.100.102/24 | N/A          |
-# +---+---------------------------+--------------+---------------------------------+-------+---------+--------------------+--------------+
-
-def interface(if_name, sif_index, ipv4_address, ipv4_prefix, enabled) -> Tuple[str, Dict]:
-    str_path = '/interface[{:s}]'.format(if_name)
-    str_data = {'name': if_name, 'enabled': enabled, 'sub_if_index': sif_index, 'sub_if_enabled': enabled,
-                'sub_if_ipv4_enabled': enabled, 'sub_if_ipv4_address': ipv4_address, 'sub_if_ipv4_prefix': ipv4_prefix}
-    return str_path, str_data
-
-def network_instance(ni_name, ni_type) -> Tuple[str, Dict]:
-    str_path = '/network_instance[{:s}]'.format(ni_name)
-    str_data = {'name': ni_name, 'type': ni_type}
-    return str_path, str_data
-
-def network_instance_static_route(ni_name, prefix, next_hop, next_hop_index=0) -> Tuple[str, Dict]:
-    str_path = '/network_instance[{:s}]/static_route[{:s}]'.format(ni_name, prefix)
-    str_data = {'name': ni_name, 'prefix': prefix, 'next_hop': next_hop, 'next_hop_index': next_hop_index}
-    return str_path, str_data
-
-def network_instance_interface(ni_name, if_name, sif_index) -> Tuple[str, Dict]:
-    str_path = '/network_instance[{:s}]/interface[{:s}.{:d}]'.format(ni_name, if_name, sif_index)
-    str_data = {'name': ni_name, 'if_name': if_name, 'sif_index': sif_index}
-    return str_path, str_data
-
-def main():
-    driver_settings = {
-        'protocol': 'gnmi',
-        'username': 'admin',
-        'password': 'NokiaSrl1!',
-        'use_tls' : True,
-    }
-    driver = GnmiOpenConfigDriver('172.100.100.102', 57400, **driver_settings)
-    driver.Connect()
-
-    #resources_to_get = []
-    #resources_to_get = [RESOURCE_ENDPOINTS]
-    #resources_to_get = [RESOURCE_INTERFACES]
-    #resources_to_get = [RESOURCE_NETWORK_INSTANCES]
-    #resources_to_get = [RESOURCE_ROUTING_POLICIES]
-    #resources_to_get = [RESOURCE_SERVICES]
-    #LOGGER.info('resources_to_get = {:s}'.format(str(resources_to_get)))
-    #results_getconfig = driver.GetConfig(resources_to_get)
-    #LOGGER.info('results_getconfig = {:s}'.format(str(results_getconfig)))
-
-    #resources_to_set = [
-    #    network_instance('test-svc', 'L3VRF'),
-    #
-    #    interface('ethernet-1/1', 0, '172.16.0.1', 24, True),
-    #    network_instance_interface('test-svc', 'ethernet-1/1', 0),
-    #
-    #    interface('ethernet-1/2', 0, '172.0.0.1', 24, True),
-    #    network_instance_interface('test-svc', 'ethernet-1/2', 0),
-    #
-    #    network_instance_static_route('test-svc', '172.0.0.0/24', '172.16.0.2'),
-    #    network_instance_static_route('test-svc', '172.2.0.0/24', '172.16.0.3'),
-    #]
-    #LOGGER.info('resources_to_set = {:s}'.format(str(resources_to_set)))
-    #results_setconfig = driver.SetConfig(resources_to_set)
-    #LOGGER.info('results_setconfig = {:s}'.format(str(results_setconfig)))
-
-    resources_to_delete = [
-        #network_instance_static_route('d35fc1d9', '172.0.0.0/24', '172.16.0.2'),
-        #network_instance_static_route('d35fc1d9', '172.2.0.0/24', '172.16.0.3'),
-    
-        #network_instance_interface('d35fc1d9', 'ethernet-1/1', 0),
-        #network_instance_interface('d35fc1d9', 'ethernet-1/2', 0),
-    
-        interface('ethernet-1/1', 0, '172.16.1.1', 24, True),
-        interface('ethernet-1/2', 0, '172.0.0.2', 24, True),
-    
-        network_instance('20f66fb5', 'L3VRF'),
-    ]
-    LOGGER.info('resources_to_delete = {:s}'.format(str(resources_to_delete)))
-    results_deleteconfig = driver.DeleteConfig(resources_to_delete)
-    LOGGER.info('results_deleteconfig = {:s}'.format(str(results_deleteconfig)))
-
-    time.sleep(1)
-
-    driver.Disconnect()
-    return 0
-
-if __name__ == '__main__':
-    sys.exit(main())
diff --git a/src/monitoring/service/EventTools.py b/src/monitoring/service/EventTools.py
index 7820f11c86e87f543087c88704572e1a169c6e7d..2ad31c9cb7df68e2b57d280aa5cdcf356376d8c6 100644
--- a/src/monitoring/service/EventTools.py
+++ b/src/monitoring/service/EventTools.py
@@ -108,12 +108,15 @@ class EventsDeviceCollector:
                         if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue
                         if config_rule.WhichOneof('config_rule') != 'custom': continue
                         str_resource_key = str(config_rule.custom.resource_key)
-                        if not str_resource_key.startswith('/interface['): continue
-                        json_resource_value = json.loads(config_rule.custom.resource_value)
-                        if 'name' not in json_resource_value: continue
-                        if 'enabled' not in json_resource_value: continue
-                        if not json_resource_value['enabled']: continue
-                        enabled_endpoint_names.add(json_resource_value['name'])
+                        if str_resource_key.startswith('/interface[') or str_resource_key.startswith('/endpoints/endpoint['):
+                            json_resource_value = json.loads(config_rule.custom.resource_value)
+                            if 'name' not in json_resource_value: continue
+                            if 'enabled' in json_resource_value:
+                                if not json_resource_value['enabled']: continue
+                                enabled_endpoint_names.add(json_resource_value['name'])
+                            if 'oper-status' in json_resource_value:
+                                if str(json_resource_value['oper-status']).upper() != 'UP': continue
+                                enabled_endpoint_names.add(json_resource_value['name'])
 
                     endpoints_monitored = self._device_endpoint_monitored.setdefault(device_uuid, dict())
                     for endpoint in device.device_endpoints:
@@ -127,7 +130,10 @@ class EventsDeviceCollector:
                         endpoint_was_monitored = endpoints_monitored.get(endpoint_uuid, False)
                         endpoint_is_enabled = (endpoint_name_or_uuid in enabled_endpoint_names)
 
-                        if not endpoint_was_monitored and endpoint_is_enabled:
+                        if not endpoint_was_monitored and not endpoint_is_enabled:
+                            # endpoint is idle, do nothing
+                            pass
+                        elif not endpoint_was_monitored and endpoint_is_enabled:
                             # activate
                             for value in endpoint.kpi_sample_types:
                                 if value == KPISAMPLETYPE_UNKNOWN: continue
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_l3vpn/Handlers.py b/src/nbi/service/rest_server/nbi_plugins/ietf_l3vpn/Handlers.py
index f7329cb35666f423e85f99510e5f89a82e89b7f8..1f399070aa2dbd0f9b22d32e15574b7bc38315ff 100644
--- a/src/nbi/service/rest_server/nbi_plugins/ietf_l3vpn/Handlers.py
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_l3vpn/Handlers.py
@@ -195,11 +195,17 @@ def process_site(site : Dict, errors : List[Dict]) -> None:
 
     # site_static_routing: (lan-range, lan-prefix-len, lan-tag) => next-hop
     site_static_routing : Dict[Tuple[str, str], str] = {}
-    for rt_proto in site['routing-protocols']['routing-protocol']:
+    site_routing_protocols : Dict = site.get('routing-protocols', dict())
+    site_routing_protocol : List = site_routing_protocols.get('routing-protocol', list())
+    for rt_proto in site_routing_protocol:
         if rt_proto['type'] != 'ietf-l3vpn-svc:static':
             MSG = 'Site Routing Protocol Type: {:s}'
             raise NotImplementedError(MSG.format(str(rt_proto['type'])))
-        for ipv4_rt in rt_proto['static']['cascaded-lan-prefixes']['ipv4-lan-prefixes']:
+        
+        rt_proto_static : Dict = rt_proto.get('static', dict())
+        rt_proto_static_clps : Dict = rt_proto_static.get('cascaded-lan-prefixes', dict())
+        rt_proto_static_clps_v4 = rt_proto_static_clps.get('ipv4-lan-prefixes', list())
+        for ipv4_rt in rt_proto_static_clps_v4:
             lan_range, lan_prefix = ipv4_rt['lan'].split('/')
             lan_prefix = int(lan_prefix)
             lan_tag   = int(ipv4_rt['lan-tag'].replace('vlan', ''))
diff --git a/src/nbi/service/rest_server/nbi_plugins/ietf_l3vpn/L3VPN_Service.py b/src/nbi/service/rest_server/nbi_plugins/ietf_l3vpn/L3VPN_Service.py
index a313677c12203c1621b920f3fcb7f6ff0c281bfb..47fb05d5571b440e867e776a753f556c3130b119 100644
--- a/src/nbi/service/rest_server/nbi_plugins/ietf_l3vpn/L3VPN_Service.py
+++ b/src/nbi/service/rest_server/nbi_plugins/ietf_l3vpn/L3VPN_Service.py
@@ -44,7 +44,7 @@ class L3VPN_Service(Resource):
 
             service_ready_status = ServiceStatusEnum.SERVICESTATUS_ACTIVE
             service_status = target.service_status.service_status # pylint: disable=no-member
-            response = jsonify({})
+            response = jsonify({'service-id': target.service_id.service_uuid.uuid})
             response.status_code = HTTP_OK if service_status == service_ready_status else HTTP_GATEWAYTIMEOUT
         except Exception as e: # pylint: disable=broad-except
             LOGGER.exception('Something went wrong Retrieving VPN({:s})'.format(str(vpn_id)))
diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py
index 3ed2b13fb33ae06faeacc4286959a8016ca995d1..3394d8df99933d8acc83a8cc8cebb1488ff6752c 100644
--- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py
+++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py
@@ -14,6 +14,7 @@
 
 import json, logging, requests, uuid
 from typing import Dict, List, Optional, Tuple, Union
+from common.DeviceTypes import DeviceTypeEnum
 from common.proto.context_pb2 import (
     ConfigRule, Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum, ServiceTypeEnum
 )
@@ -251,21 +252,37 @@ class _Algorithm:
                 ]
 
                 self.logger.debug('path_hops = {:s}'.format(str(path_hops)))
-                try:
-                    _device_dict = {k:v[0] for k,v in self.device_dict.items()}
-                    self.logger.debug('self.device_dict = {:s}'.format(str(_device_dict)))
-                    connections = convert_explicit_path_hops_to_connections(
-                        path_hops, self.device_dict, main_service_uuid, main_service_type)
-                    self.logger.debug('EXTRAPOLATED connections = {:s}'.format(str(connections)))
-                except: # pylint: disable=bare-except
-                    MSG = ' '.join([
-                        'Unable to Extrapolate sub-services and sub-connections.',
-                        'Assuming single-service and single-connection.',
-                    ])
-                    self.logger.exception(MSG)
+                device_types = {v[0]['device_type'] for k,v in self.device_dict.items()}
+                DEVICES_BASIC_CONNECTION = {
+                    DeviceTypeEnum.DATACENTER.value,    DeviceTypeEnum.EMULATED_DATACENTER.value,
+                    DeviceTypeEnum.CLIENT.value,        DeviceTypeEnum.EMULATED_CLIENT.value,
+                    DeviceTypeEnum.PACKET_ROUTER.value, DeviceTypeEnum.EMULATED_PACKET_ROUTER.value,
+                }
+                self.logger.debug('device_types = {:s}'.format(str(device_types)))
+                self.logger.debug('DEVICES_BASIC_CONNECTION = {:s}'.format(str(DEVICES_BASIC_CONNECTION)))
+                is_basic_connection = device_types.issubset(DEVICES_BASIC_CONNECTION)
+                self.logger.debug('is_basic_connection = {:s}'.format(str(is_basic_connection)))
+                if is_basic_connection:
+                    self.logger.info('Assuming basic connections...')
                     connections = convert_explicit_path_hops_to_plain_connection(
                         path_hops, main_service_uuid, main_service_type)
                     self.logger.debug('BASIC connections = {:s}'.format(str(connections)))
+                else:
+                    try:
+                        _device_dict = {k:v[0] for k,v in self.device_dict.items()}
+                        self.logger.debug('self.device_dict = {:s}'.format(str(_device_dict)))
+                        connections = convert_explicit_path_hops_to_connections(
+                            path_hops, self.device_dict, main_service_uuid, main_service_type)
+                        self.logger.debug('EXTRAPOLATED connections = {:s}'.format(str(connections)))
+                    except: # pylint: disable=bare-except
+                        MSG = ' '.join([
+                            'Unable to Extrapolate sub-services and sub-connections.',
+                            'Assuming single-service and single-connection.',
+                        ])
+                        self.logger.exception(MSG)
+                        connections = convert_explicit_path_hops_to_plain_connection(
+                            path_hops, main_service_uuid, main_service_type)
+                        self.logger.debug('BASIC connections = {:s}'.format(str(connections)))
 
                 for connection in connections:
                     service_uuid,service_type,path_hops,_ = connection
diff --git a/src/service/service/service_handler_api/SettingsHandler.py b/src/service/service/service_handler_api/SettingsHandler.py
index 293de54aa84be11f3c31bc1b47fce852df19a16a..24c5b638a35859b144969425d36ddad63a39d611 100644
--- a/src/service/service/service_handler_api/SettingsHandler.py
+++ b/src/service/service/service_handler_api/SettingsHandler.py
@@ -57,6 +57,11 @@ class SettingsHandler:
     def get(self, key_or_path : Union[str, List[str]], default : Optional[Any] = None) -> Optional[TreeNode]:
         return get_subnode(self.__resolver, self.__config, key_or_path, default=default)
 
+    def get_service_settings(self) -> Optional[TreeNode]:
+        service_settings_uri = '/settings'
+        service_settings = self.get(service_settings_uri)
+        return service_settings
+
     def get_device_settings(self, device : Device) -> Optional[TreeNode]:
         device_keys = device.device_id.device_uuid.uuid, device.name
 
diff --git a/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py b/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py
index be314a8c1ee5112dd9d321dd2c1ee1dc6173aca4..777cc4588a6d3a1a3664f736faf03665efa50c79 100644
--- a/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py
+++ b/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py
@@ -12,33 +12,65 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Dict, List, Optional, Tuple
-from common.proto.context_pb2 import Device, EndPoint
+import json, logging, netaddr, re
+from typing import Dict, List, Optional, Set, Tuple
+from common.DeviceTypes import DeviceTypeEnum
+from common.proto.context_pb2 import ConfigActionEnum, Device, EndPoint, Service
 from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
-
 from service.service.service_handler_api.AnyTreeTools import TreeNode
 
-def _interface(if_name, sif_index, ipv4_address, ipv4_prefix, enabled) -> Tuple[str, Dict]:
-    str_path = '/interface[{:s}]'.format(if_name)
-    str_data = {'name': if_name, 'enabled': enabled, 'sub_if_index': sif_index,
-                'sub_if_enabled': enabled, 'sub_if_ipv4_enabled': enabled,
-                'sub_if_ipv4_address': ipv4_address, 'sub_if_ipv4_prefix': ipv4_prefix}
-    return str_path, str_data
+LOGGER = logging.getLogger(__name__)
+
+NETWORK_INSTANCE = 'teraflowsdn'
+
+RE_IF    = re.compile(r'^\/interface\[([^\]]+)\]$')
+RE_SUBIF = re.compile(r'^\/interface\[([^\]]+)\]\/subinterface\[([^\]]+)\]$')
+RE_SR    = re.compile(r'^\/network_instance\[([^\]]+)\]\/protocols\[STATIC\]/route\[([^\:]+)\:([^\]]+)\]$')
+
+def _interface(
+    interface : str, if_type : Optional[str] = 'l3ipvlan', index : int = 0, vlan_id : Optional[int] = None,
+    address_ip : Optional[str] = None, address_prefix : Optional[int] = None, mtu : Optional[int] = None,
+    enabled : bool = True
+) -> Tuple[str, Dict]:
+    path = '/interface[{:s}]/subinterface[{:d}]'.format(interface, index)
+    data = {'name': interface, 'type': if_type, 'index': index, 'enabled': enabled}
+    if if_type is not None: data['type'] = if_type
+    if vlan_id is not None: data['vlan_id'] = vlan_id
+    if address_ip is not None: data['address_ip'] = address_ip
+    if address_prefix is not None: data['address_prefix'] = address_prefix
+    if mtu is not None: data['mtu'] = mtu
+    return path, data
 
-def _network_instance(ni_name, ni_type) -> Tuple[str, Dict]:
-    str_path = '/network_instance[{:s}]'.format(ni_name)
-    str_data = {'name': ni_name, 'type': ni_type}
-    return str_path, str_data
+def _network_instance(ni_name : str, ni_type : str) -> Tuple[str, Dict]:
+    path = '/network_instance[{:s}]'.format(ni_name)
+    data = {'name': ni_name, 'type': ni_type}
+    return path, data
 
-def _network_instance_static_route(ni_name, prefix, next_hop, next_hop_index=0) -> Tuple[str, Dict]:
-    str_path = '/network_instance[{:s}]/static_route[{:s}]'.format(ni_name, prefix)
-    str_data = {'name': ni_name, 'prefix': prefix, 'next_hop': next_hop, 'next_hop_index': next_hop_index}
-    return str_path, str_data
+def _network_instance_protocol(ni_name : str, protocol : str) -> Tuple[str, Dict]:
+    path = '/network_instance[{:s}]/protocols[{:s}]'.format(ni_name, protocol)
+    data = {'name': ni_name, 'identifier': protocol, 'protocol_name': protocol}
+    return path, data
 
-def _network_instance_interface(ni_name, if_name, sif_index) -> Tuple[str, Dict]:
-    str_path = '/network_instance[{:s}]/interface[{:s}.{:d}]'.format(ni_name, if_name, sif_index)
-    str_data = {'name': ni_name, 'if_name': if_name, 'sif_index': sif_index}
-    return str_path, str_data
+def _network_instance_protocol_static(ni_name : str) -> Tuple[str, Dict]:
+    return _network_instance_protocol(ni_name, 'STATIC')
+
+def _network_instance_protocol_static_route(
+    ni_name : str, prefix : str, next_hop : str, metric : int
+) -> Tuple[str, Dict]:
+    protocol = 'STATIC'
+    path = '/network_instance[{:s}]/protocols[{:s}]/static_route[{:s}:{:d}]'.format(ni_name, protocol, prefix, metric)
+    index = 'AUTO_{:d}_{:s}'.format(metric, next_hop.replace('.', '-'))
+    data = {
+        'name': ni_name, 'identifier': protocol, 'protocol_name': protocol,
+        'prefix': prefix, 'index': index, 'next_hop': next_hop, 'metric': metric
+    }
+    return path, data
+
+def _network_instance_interface(ni_name : str, interface : str, sub_interface_index : int) -> Tuple[str, Dict]:
+    sub_interface_name = '{:s}.{:d}'.format(interface, sub_interface_index)
+    path = '/network_instance[{:s}]/interface[{:s}]'.format(ni_name, sub_interface_name)
+    data = {'name': ni_name, 'id': sub_interface_name, 'interface': interface, 'subinterface': sub_interface_index}
+    return path, data
 
 class EndpointComposer:
     def __init__(self, endpoint_uuid : str) -> None:
@@ -46,74 +78,201 @@ class EndpointComposer:
         self.objekt : Optional[EndPoint] = None
         self.sub_interface_index = 0
         self.ipv4_address = None
-        self.ipv4_prefix = None
+        self.ipv4_prefix_len = None
 
-    def configure(self, endpoint_obj : EndPoint, settings : Optional[TreeNode]) -> None:
-        self.objekt = endpoint_obj
+    def configure(self, endpoint_obj : Optional[EndPoint], settings : Optional[TreeNode]) -> None:
+        if endpoint_obj is not None:
+            self.objekt = endpoint_obj
         if settings is None: return
         json_settings : Dict = settings.value
-        self.ipv4_address = json_settings['ipv4_address']
-        self.ipv4_prefix = json_settings['ipv4_prefix']
-        self.sub_interface_index = json_settings['sub_interface_index']
+
+        if 'address_ip' in json_settings:
+            self.ipv4_address = json_settings['address_ip']
+        elif 'ip_address' in json_settings:
+            self.ipv4_address = json_settings['ip_address']
+        else:
+            MSG = 'IP Address not found. Tried: address_ip and ip_address. endpoint_obj={:s} settings={:s}'
+            LOGGER.warning(MSG.format(str(endpoint_obj), str(settings)))
+
+        if 'address_prefix' in json_settings:
+            self.ipv4_prefix_len = json_settings['address_prefix']
+        elif 'prefix_length' in json_settings:
+            self.ipv4_prefix_len = json_settings['prefix_length']
+        else:
+            MSG = 'IP Address Prefix not found. Tried: address_prefix and prefix_length. endpoint_obj={:s} settings={:s}'
+            LOGGER.warning(MSG.format(str(endpoint_obj), str(settings)))
+
+        self.sub_interface_index = json_settings.get('index', 0)
 
     def get_config_rules(self, network_instance_name : str, delete : bool = False) -> List[Dict]:
+        if self.ipv4_address is None: return []
+        if self.ipv4_prefix_len is None: return []
         json_config_rule = json_config_rule_delete if delete else json_config_rule_set
-        return [
-            json_config_rule(*_interface(
-                self.objekt.name, self.sub_interface_index, self.ipv4_address, self.ipv4_prefix, True
-            )),
+        config_rules = [
             json_config_rule(*_network_instance_interface(
                 network_instance_name, self.objekt.name, self.sub_interface_index
             )),
         ]
+        if not delete:
+            config_rules.extend([
+                json_config_rule(*_interface(
+                    self.objekt.name, index=self.sub_interface_index, address_ip=self.ipv4_address,
+                    address_prefix=self.ipv4_prefix_len, enabled=True
+                )),
+            ])
+        return config_rules
+
+    def dump(self) -> Dict:
+        return {
+            'index'         : self.sub_interface_index,
+            'address_ip'    : self.ipv4_address,
+            'address_prefix': self.ipv4_prefix_len,
+        }
 
 class DeviceComposer:
     def __init__(self, device_uuid : str) -> None:
         self.uuid = device_uuid
         self.objekt : Optional[Device] = None
-        self.endpoints : Dict[str, EndpointComposer] = dict()
-        self.static_routes : Dict[str, str] = dict()
-    
+        self.aliases : Dict[str, str] = dict() # endpoint_name => endpoint_uuid
+        self.endpoints : Dict[str, EndpointComposer] = dict() # endpoint_uuid => EndpointComposer
+        self.connected : Set[str] = set()
+        self.static_routes : Dict[str, Dict[int, str]] = dict() # {prefix => {metric => next_hop}}
+
+    def set_endpoint_alias(self, endpoint_name : str, endpoint_uuid : str) -> None:
+        self.aliases[endpoint_name] = endpoint_uuid
+
     def get_endpoint(self, endpoint_uuid : str) -> EndpointComposer:
+        endpoint_uuid = self.aliases.get(endpoint_uuid, endpoint_uuid)
         if endpoint_uuid not in self.endpoints:
             self.endpoints[endpoint_uuid] = EndpointComposer(endpoint_uuid)
         return self.endpoints[endpoint_uuid]
 
     def configure(self, device_obj : Device, settings : Optional[TreeNode]) -> None:
         self.objekt = device_obj
+        for endpoint_obj in device_obj.device_endpoints:
+            endpoint_uuid = endpoint_obj.endpoint_id.endpoint_uuid.uuid
+            self.set_endpoint_alias(endpoint_obj.name, endpoint_uuid)
+            self.get_endpoint(endpoint_obj.name).configure(endpoint_obj, None)
+
+        # Find management interfaces
+        mgmt_ifaces = set()
+        for config_rule in device_obj.device_config.config_rules:
+            if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue
+            if config_rule.WhichOneof('config_rule') != 'custom': continue
+            config_rule_custom = config_rule.custom
+            match = RE_IF.match(config_rule_custom.resource_key)
+            if match is None: continue
+            if_name = match.groups()[0]
+            resource_value = json.loads(config_rule_custom.resource_value)
+            management = resource_value.get('management', False)
+            if management: mgmt_ifaces.add(if_name)
+
+        # Find data plane interfaces
+        for config_rule in device_obj.device_config.config_rules:
+            if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue
+            if config_rule.WhichOneof('config_rule') != 'custom': continue
+            config_rule_custom = config_rule.custom
+
+            match = RE_SUBIF.match(config_rule_custom.resource_key)
+            if match is not None:
+                if_name, subif_index = match.groups()
+                if if_name in mgmt_ifaces: continue
+                resource_value = json.loads(config_rule_custom.resource_value)
+                if 'address_ip' not in resource_value: continue
+                if 'address_prefix' not in resource_value: continue
+                ipv4_network    = str(resource_value['address_ip'])
+                ipv4_prefix_len = int(resource_value['address_prefix'])
+                endpoint = self.get_endpoint(if_name)
+                endpoint.ipv4_address = ipv4_network
+                endpoint.ipv4_prefix_len = ipv4_prefix_len
+                endpoint.sub_interface_index = int(subif_index)
+                endpoint_ip_network = netaddr.IPNetwork('{:s}/{:d}'.format(ipv4_network, ipv4_prefix_len))
+                self.connected.add(str(endpoint_ip_network.cidr))
+
+            match = RE_SR.match(config_rule_custom.resource_key)
+            if match is not None:
+                ni_name, prefix, metric = match.groups()
+                if ni_name != NETWORK_INSTANCE: continue
+                resource_value : Dict = json.loads(config_rule_custom.resource_value)
+                next_hop = resource_value['next_hop']
+                self.static_routes.setdefault(prefix, dict())[metric] = next_hop
+
         if settings is None: return
         json_settings : Dict = settings.value
-        static_routes = json_settings.get('static_routes', [])
+        static_routes : List[Dict] = json_settings.get('static_routes', [])
         for static_route in static_routes:
             prefix   = static_route['prefix']
             next_hop = static_route['next_hop']
-            self.static_routes[prefix] = next_hop
+            metric   = static_route.get('metric', 0)
+            self.static_routes.setdefault(prefix, dict())[metric] = next_hop
 
     def get_config_rules(self, network_instance_name : str, delete : bool = False) -> List[Dict]:
+        SELECTED_DEVICES = {DeviceTypeEnum.PACKET_ROUTER.value, DeviceTypeEnum.EMULATED_PACKET_ROUTER.value}
+        if self.objekt.device_type not in SELECTED_DEVICES: return []
+
         json_config_rule = json_config_rule_delete if delete else json_config_rule_set
         config_rules = [
             json_config_rule(*_network_instance(network_instance_name, 'L3VRF'))
         ]
         for endpoint in self.endpoints.values():
             config_rules.extend(endpoint.get_config_rules(network_instance_name, delete=delete))
-        for prefix, next_hop in self.static_routes.items():
+        if len(self.static_routes) > 0:
             config_rules.append(
-                json_config_rule(*_network_instance_static_route(network_instance_name, prefix, next_hop))
+                json_config_rule(*_network_instance_protocol_static(network_instance_name))
             )
+        for prefix, metric_next_hop in self.static_routes.items():
+            for metric, next_hop in metric_next_hop.items():
+                config_rules.append(
+                    json_config_rule(*_network_instance_protocol_static_route(
+                        network_instance_name, prefix, next_hop, metric
+                    ))
+                )
         if delete: config_rules = list(reversed(config_rules))
         return config_rules
 
+    def dump(self) -> Dict:
+        return {
+            'endpoints' : {
+                endpoint_uuid : endpoint.dump()
+                for endpoint_uuid, endpoint in self.endpoints.items()
+            },
+            'connected' : list(self.connected),
+            'static_routes' : self.static_routes,
+        }
+
 class ConfigRuleComposer:
     def __init__(self) -> None:
-        self.devices : Dict[str, DeviceComposer] = dict()
+        self.objekt : Optional[Service] = None
+        self.aliases : Dict[str, str] = dict() # device_name => device_uuid
+        self.devices : Dict[str, DeviceComposer] = dict() # device_uuid => DeviceComposer
+
+    def set_device_alias(self, device_name : str, device_uuid : str) -> None:
+        self.aliases[device_name] = device_uuid
 
     def get_device(self, device_uuid : str) -> DeviceComposer:
+        device_uuid = self.aliases.get(device_uuid, device_uuid)
         if device_uuid not in self.devices:
             self.devices[device_uuid] = DeviceComposer(device_uuid)
         return self.devices[device_uuid]
 
-    def get_config_rules(self, network_instance_name : str, delete : bool = False) -> Dict[str, List[Dict]]:
+    def configure(self, service_obj : Service, settings : Optional[TreeNode]) -> None:
+        self.objekt = service_obj
+        if settings is None: return
+        #json_settings : Dict = settings.value
+        # For future use
+
+    def get_config_rules(
+        self, network_instance_name : str = NETWORK_INSTANCE, delete : bool = False
+    ) -> Dict[str, List[Dict]]:
         return {
             device_uuid : device.get_config_rules(network_instance_name, delete=delete)
             for device_uuid, device in self.devices.items()
         }
+
+    def dump(self) -> Dict:
+        return {
+            'devices' : {
+                device_uuid : device.dump()
+                for device_uuid, device in self.devices.items()
+            }
+        }
diff --git a/src/service/service/service_handlers/l3nm_gnmi_openconfig/L3NMGnmiOpenConfigServiceHandler.py b/src/service/service/service_handlers/l3nm_gnmi_openconfig/L3NMGnmiOpenConfigServiceHandler.py
index 5856b5f61893174a92ce02a303ae9ad30be16005..88bb5655b872dfce90988686e7d8bc242d866bf0 100644
--- a/src/service/service/service_handlers/l3nm_gnmi_openconfig/L3NMGnmiOpenConfigServiceHandler.py
+++ b/src/service/service/service_handlers/l3nm_gnmi_openconfig/L3NMGnmiOpenConfigServiceHandler.py
@@ -15,14 +15,17 @@
 import json, logging
 from typing import Any, Dict, List, Optional, Tuple, Union
 from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
-from common.proto.context_pb2 import ConfigRule, DeviceId, Service
+from common.proto.context_pb2 import ConfigRule, ConnectionId, DeviceId, Service
+from common.tools.object_factory.Connection import json_connection_id
 from common.tools.object_factory.Device import json_device_id
 from common.type_checkers.Checkers import chk_type
-from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching
 from service.service.service_handler_api._ServiceHandler import _ServiceHandler
 from service.service.service_handler_api.SettingsHandler import SettingsHandler
+from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching
 from service.service.task_scheduler.TaskExecutor import TaskExecutor
+from service.service.tools.EndpointIdFormatters import endpointids_to_raw
 from .ConfigRuleComposer import ConfigRuleComposer
+from .StaticRouteGenerator import StaticRouteGenerator
 
 LOGGER = logging.getLogger(__name__)
 
@@ -35,24 +38,35 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
         self.__service = service
         self.__task_executor = task_executor
         self.__settings_handler = SettingsHandler(service.service_config, **settings)
-        self.__composer = ConfigRuleComposer()
-        self.__endpoint_map : Dict[Tuple[str, str], str] = dict()
+        self.__config_rule_composer = ConfigRuleComposer()
+        self.__static_route_generator = StaticRouteGenerator(self.__config_rule_composer)
+        self.__endpoint_map : Dict[Tuple[str, str], Tuple[str, str]] = dict()
 
     def _compose_config_rules(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> None:
+        if len(endpoints) % 2 != 0: raise Exception('Number of endpoints should be even')
+
+        service_settings = self.__settings_handler.get_service_settings()
+        self.__config_rule_composer.configure(self.__service, service_settings)
+
         for endpoint in endpoints:
             device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint)
 
             device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
             device_settings = self.__settings_handler.get_device_settings(device_obj)
-            _device = self.__composer.get_device(device_obj.name)
+            self.__config_rule_composer.set_device_alias(device_obj.name, device_uuid)
+            _device = self.__config_rule_composer.get_device(device_obj.name)
             _device.configure(device_obj, device_settings)
 
             endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid)
             endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj)
+            _device.set_endpoint_alias(endpoint_obj.name, endpoint_uuid)
             _endpoint = _device.get_endpoint(endpoint_obj.name)
             _endpoint.configure(endpoint_obj, endpoint_settings)
 
-            self.__endpoint_map[(device_uuid, endpoint_uuid)] = device_obj.name
+            self.__endpoint_map[(device_uuid, endpoint_uuid)] = (device_obj.name, endpoint_obj.name)
+
+        self.__static_route_generator.compose(endpoints)
+        LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump())))
 
     def _do_configurations(
         self, config_rules_per_device : Dict[str, List[Dict]], endpoints : List[Tuple[str, str, Optional[str]]],
@@ -62,7 +76,7 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
         results_per_device = dict()
         for device_name,json_config_rules in config_rules_per_device.items():
             try:
-                device_obj = self.__composer.get_device(device_name).objekt
+                device_obj = self.__config_rule_composer.get_device(device_name).objekt
                 if len(json_config_rules) == 0: continue
                 del device_obj.device_config.config_rules[:]
                 for json_config_rule in json_config_rules:
@@ -78,7 +92,8 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
         results = []
         for endpoint in endpoints:
             device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint)
-            device_name = self.__endpoint_map[(device_uuid, endpoint_uuid)]
+            device_name, _ = self.__endpoint_map[(device_uuid, endpoint_uuid)]
+            if device_name not in results_per_device: continue
             results.append(results_per_device[device_name])
         return results
 
@@ -88,12 +103,16 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
     ) -> List[Union[bool, Exception]]:
         chk_type('endpoints', endpoints, list)
         if len(endpoints) == 0: return []
-        service_uuid = self.__service.service_id.service_uuid.uuid
-        #settings = self.__settings_handler.get('/settings')
-        self._compose_config_rules(endpoints)
-        network_instance_name = service_uuid.split('-')[0]
-        config_rules_per_device = self.__composer.get_config_rules(network_instance_name, delete=False)
+        #service_uuid = self.__service.service_id.service_uuid.uuid
+        connection = self.__task_executor.get_connection(ConnectionId(**json_connection_id(connection_uuid)))
+        connection_endpoint_ids = endpointids_to_raw(connection.path_hops_endpoint_ids)
+        self._compose_config_rules(connection_endpoint_ids)
+        #network_instance_name = service_uuid.split('-')[0]
+        #config_rules_per_device = self.__config_rule_composer.get_config_rules(network_instance_name, delete=False)
+        config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=False)
+        LOGGER.debug('config_rules_per_device={:s}'.format(str(config_rules_per_device)))
         results = self._do_configurations(config_rules_per_device, endpoints)
+        LOGGER.debug('results={:s}'.format(str(results)))
         return results
 
     @metered_subclass_method(METRICS_POOL)
@@ -102,12 +121,16 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
     ) -> List[Union[bool, Exception]]:
         chk_type('endpoints', endpoints, list)
         if len(endpoints) == 0: return []
-        service_uuid = self.__service.service_id.service_uuid.uuid
-        #settings = self.__settings_handler.get('/settings')
-        self._compose_config_rules(endpoints)
-        network_instance_name = service_uuid.split('-')[0]
-        config_rules_per_device = self.__composer.get_config_rules(network_instance_name, delete=True)
+        #service_uuid = self.__service.service_id.service_uuid.uuid
+        connection = self.__task_executor.get_connection(ConnectionId(**json_connection_id(connection_uuid)))
+        connection_endpoint_ids = endpointids_to_raw(connection.path_hops_endpoint_ids)
+        self._compose_config_rules(connection_endpoint_ids)
+        #network_instance_name = service_uuid.split('-')[0]
+        #config_rules_per_device = self.__config_rule_composer.get_config_rules(network_instance_name, delete=True)
+        config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=True)
+        LOGGER.debug('config_rules_per_device={:s}'.format(str(config_rules_per_device)))
         results = self._do_configurations(config_rules_per_device, endpoints, delete=True)
+        LOGGER.debug('results={:s}'.format(str(results)))
         return results
 
     @metered_subclass_method(METRICS_POOL)
diff --git a/src/service/service/service_handlers/l3nm_gnmi_openconfig/StaticRouteGenerator.py b/src/service/service/service_handlers/l3nm_gnmi_openconfig/StaticRouteGenerator.py
new file mode 100644
index 0000000000000000000000000000000000000000..a16e4d5b1c46da600b9dc078ca9f6a74c7eaa187
--- /dev/null
+++ b/src/service/service/service_handlers/l3nm_gnmi_openconfig/StaticRouteGenerator.py
@@ -0,0 +1,197 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging, netaddr, sys
+from typing import List, Optional, Tuple
+from .ConfigRuleComposer import ConfigRuleComposer
+
+LOGGER = logging.getLogger(__name__)
+
+# Used to infer routing networks for adjacent ports when there is no hint in device/endpoint settings
+ROOT_NEIGHBOR_ROUTING_NETWORK = netaddr.IPNetwork('10.254.254.0/16')
+NEIGHBOR_ROUTING_NETWORKS_PREFIX_LEN = 30
+NEIGHBOR_ROUTING_NETWORKS = set(ROOT_NEIGHBOR_ROUTING_NETWORK.subnet(NEIGHBOR_ROUTING_NETWORKS_PREFIX_LEN))
+
+def _generate_neighbor_addresses() -> Tuple[netaddr.IPAddress, netaddr.IPAddress, int]:
+    ip_network = NEIGHBOR_ROUTING_NETWORKS.pop()
+    ip_addresses = list(ip_network.iter_hosts())
+    ip_addresses.append(NEIGHBOR_ROUTING_NETWORKS_PREFIX_LEN)
+    return ip_addresses
+
+def _compute_gateway(ip_network : netaddr.IPNetwork, gateway_host=1) -> netaddr.IPAddress:
+    return netaddr.IPAddress(ip_network.cidr.first + gateway_host)
+
+def _compose_ipv4_network(ipv4_network, ipv4_prefix_len) -> netaddr.IPNetwork:
+    return netaddr.IPNetwork('{:s}/{:d}'.format(str(ipv4_network), int(ipv4_prefix_len)))
+
+class StaticRouteGenerator:
+    def __init__(self, config_rule_composer : ConfigRuleComposer) -> None:
+        self._config_rule_composer = config_rule_composer
+
+    def compose(self, connection_hop_list : List[Tuple[str, str, Optional[str]]]) -> None:
+        link_endpoints = self._compute_link_endpoints(connection_hop_list)
+        LOGGER.debug('link_endpoints = {:s}'.format(str(link_endpoints)))
+
+        self._compute_link_addresses(link_endpoints)
+        LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self._config_rule_composer.dump())))
+
+        self._discover_connected_networks(connection_hop_list)
+        LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self._config_rule_composer.dump())))
+
+        # Compute and propagate static routes forward (service_endpoint_a => service_endpoint_b)
+        self._compute_static_routes(link_endpoints)
+
+        # Compute and propagate static routes backward (service_endpoint_b => service_endpoint_a)
+        reversed_endpoints = list(reversed(connection_hop_list))
+        reversed_link_endpoints = self._compute_link_endpoints(reversed_endpoints)
+        LOGGER.debug('reversed_link_endpoints = {:s}'.format(str(reversed_link_endpoints)))
+        self._compute_static_routes(reversed_link_endpoints)
+
+        LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self._config_rule_composer.dump())))
+
+    def _compute_link_endpoints(
+        self, connection_hop_list : List[Tuple[str, str, Optional[str]]]
+    ) -> List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]]:
+        num_connection_hops = len(connection_hop_list)
+        if num_connection_hops % 2 != 0: raise Exception('Number of connection hops must be even')
+        if num_connection_hops < 4: raise Exception('Number of connection hops must be >= 4')
+
+        # Skip service endpoints (first and last)
+        it_connection_hops = iter(connection_hop_list[1:-1])
+        return list(zip(it_connection_hops, it_connection_hops))
+
+    def _compute_link_addresses(
+        self, link_endpoints_list : List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]]
+    ) -> None:
+        for link_endpoints in link_endpoints_list:
+            device_endpoint_a, device_endpoint_b = link_endpoints
+
+            device_uuid_a, endpoint_uuid_a = device_endpoint_a[0:2]
+            endpoint_a = self._config_rule_composer.get_device(device_uuid_a).get_endpoint(endpoint_uuid_a)
+
+            device_uuid_b, endpoint_uuid_b = device_endpoint_b[0:2]
+            endpoint_b = self._config_rule_composer.get_device(device_uuid_b).get_endpoint(endpoint_uuid_b)
+
+            if endpoint_a.ipv4_address is None and endpoint_b.ipv4_address is None:
+                ip_endpoint_a, ip_endpoint_b, prefix_len = _generate_neighbor_addresses()
+                endpoint_a.ipv4_address    = str(ip_endpoint_a)
+                endpoint_a.ipv4_prefix_len = prefix_len
+                endpoint_b.ipv4_address    = str(ip_endpoint_b)
+                endpoint_b.ipv4_prefix_len = prefix_len
+            elif endpoint_a.ipv4_address is not None and endpoint_b.ipv4_address is None:
+                prefix_len = endpoint_a.ipv4_prefix_len
+                ip_network_a = _compose_ipv4_network(endpoint_a.ipv4_address, prefix_len)
+                if prefix_len > 30:
+                    MSG = 'Unsupported prefix_len for {:s}: {:s}'
+                    raise Exception(MSG.format(str(endpoint_a), str(prefix_len)))
+                ip_endpoint_b = _compute_gateway(ip_network_a, gateway_host=1)
+                if ip_endpoint_b == ip_network_a.ip:
+                    ip_endpoint_b = _compute_gateway(ip_network_a, gateway_host=2)
+                endpoint_b.ipv4_address    = str(ip_endpoint_b)
+                endpoint_b.ipv4_prefix_len = prefix_len
+            elif endpoint_a.ipv4_address is None and endpoint_b.ipv4_address is not None:
+                prefix_len = endpoint_b.ipv4_prefix_len
+                ip_network_b = _compose_ipv4_network(endpoint_b.ipv4_address, prefix_len)
+                if prefix_len > 30:
+                    MSG = 'Unsupported prefix_len for {:s}: {:s}'
+                    raise Exception(MSG.format(str(endpoint_b), str(prefix_len)))
+                ip_endpoint_a = _compute_gateway(ip_network_b, gateway_host=1)
+                if ip_endpoint_a == ip_network_b.ip:
+                    ip_endpoint_a = _compute_gateway(ip_network_b, gateway_host=2)
+                endpoint_a.ipv4_address    = str(ip_endpoint_a)
+                endpoint_a.ipv4_prefix_len = prefix_len
+            elif endpoint_a.ipv4_address is not None and endpoint_b.ipv4_address is not None:
+                ip_network_a = _compose_ipv4_network(endpoint_a.ipv4_address, endpoint_a.ipv4_prefix_len)
+                ip_network_b = _compose_ipv4_network(endpoint_b.ipv4_address, endpoint_b.ipv4_prefix_len)
+                if ip_network_a.cidr != ip_network_b.cidr:
+                    MSG = 'Incompatible CIDRs: endpoint_a({:s})=>{:s} endpoint_b({:s})=>{:s}'
+                    raise Exception(MSG.format(str(endpoint_a), str(ip_network_a), str(endpoint_b), str(ip_network_b)))
+                if ip_network_a.ip == ip_network_b.ip:
+                    MSG = 'Duplicated IP: endpoint_a({:s})=>{:s} endpoint_b({:s})=>{:s}'
+                    raise Exception(MSG.format(str(endpoint_a), str(ip_network_a), str(endpoint_b), str(ip_network_b)))
+
+    def _discover_connected_networks(self, connection_hop_list : List[Tuple[str, str, Optional[str]]]) -> None:
+        for connection_hop in connection_hop_list:
+            device_uuid, endpoint_uuid = connection_hop[0:2]
+            device = self._config_rule_composer.get_device(device_uuid)
+            endpoint = device.get_endpoint(endpoint_uuid)
+
+            if endpoint.ipv4_address is None: continue
+            ip_network = _compose_ipv4_network(endpoint.ipv4_address, endpoint.ipv4_prefix_len)
+
+            device.connected.add(str(ip_network.cidr))
+
+    def _compute_static_routes(
+        self, link_endpoints_list : List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]]
+    ) -> None:
+        for link_endpoints in link_endpoints_list:
+            device_endpoint_a, device_endpoint_b = link_endpoints
+
+            device_uuid_a, endpoint_uuid_a = device_endpoint_a[0:2]
+            device_a   = self._config_rule_composer.get_device(device_uuid_a)
+            endpoint_a = device_a.get_endpoint(endpoint_uuid_a)
+
+            device_uuid_b, endpoint_uuid_b = device_endpoint_b[0:2]
+            device_b   = self._config_rule_composer.get_device(device_uuid_b)
+            endpoint_b = device_b.get_endpoint(endpoint_uuid_b)
+
+            # Compute static routes from networks connected in device_a
+            for ip_network_a in device_a.connected:
+                if ip_network_a in device_b.connected: continue
+                if ip_network_a in device_b.static_routes: continue
+                if ip_network_a in ROOT_NEIGHBOR_ROUTING_NETWORK: continue
+                endpoint_a_ip_network = _compose_ipv4_network(endpoint_a.ipv4_address, endpoint_a.ipv4_prefix_len)
+                next_hop = str(endpoint_a_ip_network.ip)
+                metric = 1
+                device_b.static_routes.setdefault(ip_network_a, dict())[metric] = next_hop
+
+            # Compute static routes from networks connected in device_b
+            for ip_network_b in device_b.connected:
+                if ip_network_b in device_a.connected: continue
+                if ip_network_b in device_a.static_routes: continue
+                if ip_network_b in ROOT_NEIGHBOR_ROUTING_NETWORK: continue
+                endpoint_b_ip_network = _compose_ipv4_network(endpoint_b.ipv4_address, endpoint_b.ipv4_prefix_len)
+                next_hop = str(endpoint_b_ip_network.ip)
+                metric = 1
+                device_a.static_routes.setdefault(ip_network_b, dict())[metric] = next_hop
+
+            # Propagate static routes from networks connected in device_a
+            for ip_network_a, metric_next_hop in device_a.static_routes.items():
+                if ip_network_a in device_b.connected: continue
+                if ip_network_a in ROOT_NEIGHBOR_ROUTING_NETWORK: continue
+                endpoint_a_ip_network = _compose_ipv4_network(endpoint_a.ipv4_address, endpoint_a.ipv4_prefix_len)
+                if ip_network_a in device_b.static_routes:
+                    current_metric = min(device_b.static_routes[ip_network_a].keys())
+                else:
+                    current_metric = int(sys.float_info.max)
+                for metric, next_hop in metric_next_hop.items():
+                    new_metric = metric + 1
+                    if new_metric >= current_metric: continue
+                    next_hop_a = str(endpoint_a_ip_network.ip)
+                    device_b.static_routes.setdefault(ip_network_a, dict())[metric] = next_hop_a
+
+            # Propagate static routes from networks connected in device_b
+            for ip_network_b in device_b.static_routes.keys():
+                if ip_network_b in device_a.connected: continue
+                if ip_network_b in ROOT_NEIGHBOR_ROUTING_NETWORK: continue
+                endpoint_b_ip_network = _compose_ipv4_network(endpoint_b.ipv4_address, endpoint_b.ipv4_prefix_len)
+                if ip_network_b in device_a.static_routes:
+                    current_metric = min(device_a.static_routes[ip_network_b].keys())
+                else:
+                    current_metric = int(sys.float_info.max)
+                for metric, next_hop in metric_next_hop.items():
+                    new_metric = metric + 1
+                    if new_metric >= current_metric: continue
+                    next_hop_b = str(endpoint_b_ip_network.ip)
+                    device_a.static_routes.setdefault(ip_network_b, dict())[metric] = next_hop_b
diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py
index 67f6a516c9bc3031b5cdd2aed80cd6fdd7e1c9c2..70d17027eb48987faef562118bf4fb42dfb9c399 100644
--- a/src/service/service/task_scheduler/TaskExecutor.py
+++ b/src/service/service/task_scheduler/TaskExecutor.py
@@ -14,7 +14,8 @@
 
 import json, logging
 from enum import Enum
-from typing import TYPE_CHECKING, Any, Dict, Optional, Union
+from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
+from common.DeviceTypes import DeviceTypeEnum
 from common.method_wrappers.ServiceExceptions import NotFoundException
 from typing import List
 from common.proto.qkd_app_pb2 import QKDAppStatusEnum
@@ -166,8 +167,8 @@ class TaskExecutor:
 
     def get_devices_from_connection(
         self, connection : Connection, exclude_managed_by_controller : bool = False
-    ) -> Dict[str, Device]:
-        devices = dict()
+    ) -> Dict[DeviceTypeEnum, Dict[str, Device]]:
+        devices : Dict[DeviceTypeEnum, Dict[str, Device]] = dict()
         for endpoint_id in connection.path_hops_endpoint_ids:
             device = self.get_device(endpoint_id.device_id)
             device_uuid = endpoint_id.device_id.device_uuid.uuid
@@ -175,11 +176,14 @@ class TaskExecutor:
 
             controller = self.get_device_controller(device)
             if controller is None:
-                devices[device_uuid] = device
+                device_type = DeviceTypeEnum._value2member_map_[device.device_type]
+                devices.setdefault(device_type, dict())[device_uuid] = device
             else:
                 if not exclude_managed_by_controller:
-                    devices[device_uuid] = device
-                devices[controller.device_id.device_uuid.uuid] = controller
+                    device_type = DeviceTypeEnum._value2member_map_[device.device_type]
+                    devices.setdefault(device_type, dict())[device_uuid] = device
+                device_type = DeviceTypeEnum._value2member_map_[controller.device_type]
+                devices.setdefault(device_type, dict())[controller.device_id.device_uuid.uuid] = controller
         return devices
 
     # ----- Service-related methods ------------------------------------------------------------------------------------
@@ -206,28 +210,36 @@ class TaskExecutor:
 
     # ----- Service Handler Factory ------------------------------------------------------------------------------------
 
-    def get_service_handler(
+    def get_service_handlers(
         self, connection : Connection, service : Service, **service_handler_settings
-    ) -> '_ServiceHandler':
-        connection_devices = self.get_devices_from_connection(connection, exclude_managed_by_controller=True)
-        try:
-            service_handler_class = get_service_handler_class(
-                self._service_handler_factory, service, connection_devices)
-            return service_handler_class(service, self, **service_handler_settings)
-        except (UnsatisfiedFilterException, UnsupportedFilterFieldException, UnsupportedFilterFieldValueException):
-            dict_connection_devices = {
-                cd_data.name : (cd_uuid, cd_data.name, {
-                    (device_driver, DeviceDriverEnum.Name(device_driver))
-                    for device_driver in cd_data.device_drivers
-                })
-                for cd_uuid,cd_data in connection_devices.items()
-            }
-            LOGGER.exception(
-                'Unable to select service handler. service={:s} connection={:s} connection_devices={:s}'.format(
+    ) -> Dict[DeviceTypeEnum, Tuple['_ServiceHandler', Dict[str, Device]]]:
+        connection_device_types : Dict[DeviceTypeEnum, Dict[str, Device]] = self.get_devices_from_connection(
+            connection, exclude_managed_by_controller=True
+        )
+        service_handlers : Dict[DeviceTypeEnum, Tuple['_ServiceHandler', Dict[str, Device]]] = dict()
+        for device_type, connection_devices in connection_device_types.items():
+            try:
+                service_handler_class = get_service_handler_class(
+                    self._service_handler_factory, service, connection_devices)
+                service_handler = service_handler_class(service, self, **service_handler_settings)
+                service_handlers[device_type] = (service_handler, connection_devices)
+            except (
+                UnsatisfiedFilterException, UnsupportedFilterFieldException,
+                UnsupportedFilterFieldValueException
+            ):
+                dict_connection_devices = {
+                    cd_data.name : (cd_uuid, cd_data.name, {
+                        (device_driver, DeviceDriverEnum.Name(device_driver))
+                        for device_driver in cd_data.device_drivers
+                    })
+                    for cd_uuid,cd_data in connection_devices.items()
+                }
+                MSG = 'Unable to select service handler. service={:s} connection={:s} connection_devices={:s}'
+                LOGGER.exception(MSG.format(
                     grpc_message_to_json_string(service), grpc_message_to_json_string(connection),
                     str(dict_connection_devices)
-                )
-            )
+                ))
+        return service_handlers
 
 
     # ----- QkdApp-related methods -------------------------------------------------------------------------------------
diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py
index f6c543c1ccb947eb01c3d5f5fb93c0504a77ca95..3f52f337ae163a6e8c78d873a58a291ecff4bd1a 100644
--- a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py
+++ b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py
@@ -12,8 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from typing import TYPE_CHECKING, Dict, Tuple
+from common.DeviceTypes import DeviceTypeEnum
 from common.method_wrappers.ServiceExceptions import OperationFailedException
-from common.proto.context_pb2 import ConnectionId
+from common.proto.context_pb2 import ConnectionId, Device
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from service.service.service_handler_api.Tools import check_errors_setendpoint
 from service.service.task_scheduler.TaskExecutor import TaskExecutor
@@ -21,6 +23,9 @@ from service.service.tools.EndpointIdFormatters import endpointids_to_raw
 from service.service.tools.ObjectKeys import get_connection_key
 from ._Task import _Task
 
+if TYPE_CHECKING:
+    from service.service.service_handler_api._ServiceHandler import _ServiceHandler
+
 KEY_TEMPLATE = 'connection({connection_id:s}):configure'
 
 class Task_ConnectionConfigure(_Task):
@@ -44,12 +49,24 @@ class Task_ConnectionConfigure(_Task):
         service = self._task_executor.get_service(connection.service_id)
 
         service_handler_settings = {}
-        service_handler = self._task_executor.get_service_handler(connection, service, **service_handler_settings)
+        service_handlers : Dict[DeviceTypeEnum, Tuple['_ServiceHandler', Dict[str, Device]]] = \
+            self._task_executor.get_service_handlers(connection, service, **service_handler_settings)
 
-        endpointids_to_set = endpointids_to_raw(connection.path_hops_endpoint_ids)
         connection_uuid = connection.connection_id.connection_uuid.uuid
-        results_setendpoint = service_handler.SetEndpoint(endpointids_to_set, connection_uuid=connection_uuid)
-        errors = check_errors_setendpoint(endpointids_to_set, results_setendpoint)
+        endpointids_to_set = endpointids_to_raw(connection.path_hops_endpoint_ids)
+
+        errors = list()
+        for _, (service_handler, connection_devices) in service_handlers.items():
+            _endpointids_to_set = [
+                (device_uuid, endpoint_uuid, topology_uuid)
+                for device_uuid, endpoint_uuid, topology_uuid in endpointids_to_set
+                if device_uuid in connection_devices
+            ]
+            results_setendpoint = service_handler.SetEndpoint(
+                _endpointids_to_set, connection_uuid=connection_uuid
+            )
+            errors.extend(check_errors_setendpoint(endpointids_to_set, results_setendpoint))
+
         if len(errors) > 0:
             MSG = 'SetEndpoint for Connection({:s}) from Service({:s})'
             str_connection = grpc_message_to_json_string(connection)
diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py
index 7b6b7951befbd6abd4d052ce5eec39d3398aa6e7..4ce774d208d3ea71e55482ea0653521cb7f1083a 100644
--- a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py
+++ b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py
@@ -12,8 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from typing import TYPE_CHECKING, Dict, Tuple
+from common.DeviceTypes import DeviceTypeEnum
 from common.method_wrappers.ServiceExceptions import OperationFailedException
-from common.proto.context_pb2 import ConnectionId
+from common.proto.context_pb2 import ConnectionId, Device
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from service.service.service_handler_api.Tools import check_errors_deleteendpoint
 from service.service.task_scheduler.TaskExecutor import TaskExecutor
@@ -21,6 +23,9 @@ from service.service.tools.EndpointIdFormatters import endpointids_to_raw
 from service.service.tools.ObjectKeys import get_connection_key
 from ._Task import _Task
 
+if TYPE_CHECKING:
+    from service.service.service_handler_api._ServiceHandler import _ServiceHandler
+
 KEY_TEMPLATE = 'connection({connection_id:s}):deconfigure'
 
 class Task_ConnectionDeconfigure(_Task):
@@ -44,12 +49,24 @@ class Task_ConnectionDeconfigure(_Task):
         service = self._task_executor.get_service(connection.service_id)
 
         service_handler_settings = {}
-        service_handler = self._task_executor.get_service_handler(connection, service, **service_handler_settings)
+        service_handlers : Dict[DeviceTypeEnum, Tuple['_ServiceHandler', Dict[str, Device]]] = \
+            self._task_executor.get_service_handlers(connection, service, **service_handler_settings)
 
-        endpointids_to_delete = endpointids_to_raw(connection.path_hops_endpoint_ids)
         connection_uuid = connection.connection_id.connection_uuid.uuid
-        results_deleteendpoint = service_handler.DeleteEndpoint(endpointids_to_delete, connection_uuid=connection_uuid)
-        errors = check_errors_deleteendpoint(endpointids_to_delete, results_deleteendpoint)
+        endpointids_to_delete = endpointids_to_raw(connection.path_hops_endpoint_ids)
+
+        errors = list()
+        for _, (service_handler, connection_devices) in service_handlers.items():
+            _endpointids_to_delete = [
+                (device_uuid, endpoint_uuid, topology_uuid)
+                for device_uuid, endpoint_uuid, topology_uuid in endpointids_to_delete
+                if device_uuid in connection_devices
+            ]
+            results_deleteendpoint = service_handler.DeleteEndpoint(
+                _endpointids_to_delete, connection_uuid=connection_uuid
+            )
+            errors.extend(check_errors_deleteendpoint(endpointids_to_delete, results_deleteendpoint))
+
         if len(errors) > 0:
             MSG = 'DeleteEndpoint for Connection({:s}) from Service({:s})'
             str_connection = grpc_message_to_json_string(connection)
diff --git a/src/service/tests/test_l3nm_gnmi_static_rule_gen/MockServiceHandler.py b/src/service/tests/test_l3nm_gnmi_static_rule_gen/MockServiceHandler.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b3f76566c9d8e5b2c8bdfb05f4b2448c29b7eae
--- /dev/null
+++ b/src/service/tests/test_l3nm_gnmi_static_rule_gen/MockServiceHandler.py
@@ -0,0 +1,160 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging
+from typing import Any, Dict, List, Optional, Tuple, Union
+from common.proto.context_pb2 import ConfigRule, DeviceId, Service
+from common.tools.object_factory.Device import json_device_id
+from common.type_checkers.Checkers import chk_type
+from service.service.service_handler_api._ServiceHandler import _ServiceHandler
+from service.service.service_handler_api.SettingsHandler import SettingsHandler
+from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching
+from .MockTaskExecutor import MockTaskExecutor
+from service.service.service_handlers.l3nm_gnmi_openconfig.ConfigRuleComposer import ConfigRuleComposer
+from service.service.service_handlers.l3nm_gnmi_openconfig.StaticRouteGenerator import StaticRouteGenerator
+
+LOGGER = logging.getLogger(__name__)
+
+class MockServiceHandler(_ServiceHandler):
+    def __init__(   # pylint: disable=super-init-not-called
+        self, service : Service, task_executor : MockTaskExecutor, **settings
+    ) -> None:
+        self.__service = service
+        self.__task_executor = task_executor
+        self.__settings_handler = SettingsHandler(service.service_config, **settings)
+        self.__config_rule_composer = ConfigRuleComposer()
+        self.__static_route_generator = StaticRouteGenerator(self.__config_rule_composer)
+        self.__endpoint_map : Dict[Tuple[str, str], Tuple[str, str]] = dict()
+
+    def _compose_config_rules(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> None:
+        if len(endpoints) % 2 != 0: raise Exception('Number of endpoints should be even')
+
+        service_settings = self.__settings_handler.get_service_settings()
+        self.__config_rule_composer.configure(self.__service, service_settings)
+
+        for endpoint in endpoints:
+            device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint)
+
+            device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+            device_settings = self.__settings_handler.get_device_settings(device_obj)
+            _device = self.__config_rule_composer.get_device(device_obj.name)
+            _device.configure(device_obj, device_settings)
+
+            endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid)
+            endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj)
+            _endpoint = _device.get_endpoint(endpoint_obj.name)
+            _endpoint.configure(endpoint_obj, endpoint_settings)
+
+            self.__endpoint_map[(device_uuid, endpoint_uuid)] = (device_obj.name, endpoint_obj.name)
+
+        self.__static_route_generator.compose(endpoints)
+
+    def _do_configurations(
+        self, config_rules_per_device : Dict[str, List[Dict]], endpoints : List[Tuple[str, str, Optional[str]]],
+        delete : bool = False
+    ) -> List[Union[bool, Exception]]:
+        # Configuration is done atomically on each device, all OK / all KO per device
+        results_per_device = dict()
+        for device_name,json_config_rules in config_rules_per_device.items():
+            try:
+                device_obj = self.__config_rule_composer.get_device(device_name).objekt
+                if len(json_config_rules) == 0: continue
+                del device_obj.device_config.config_rules[:]
+                for json_config_rule in json_config_rules:
+                    device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule))
+                self.__task_executor.configure_device(device_obj)
+                results_per_device[device_name] = True
+            except Exception as e: # pylint: disable=broad-exception-caught
+                verb = 'deconfigure' if delete else 'configure'
+                MSG = 'Unable to {:s} Device({:s}) : ConfigRules({:s})'
+                LOGGER.exception(MSG.format(verb, str(device_name), str(json_config_rules)))
+                results_per_device[device_name] = e
+
+        results = []
+        for endpoint in endpoints:
+            device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint)
+            device_name, _ = self.__endpoint_map[(device_uuid, endpoint_uuid)]
+            if device_name not in results_per_device: continue
+            results.append(results_per_device[device_name])
+        return results
+
+    def SetEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+        chk_type('endpoints', endpoints, list)
+        if len(endpoints) == 0: return []
+        self._compose_config_rules(endpoints)
+        config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=False)
+        LOGGER.debug('config_rules_per_device={:s}'.format(str(config_rules_per_device)))
+        results = self._do_configurations(config_rules_per_device, endpoints)
+        LOGGER.debug('results={:s}'.format(str(results)))
+        return results
+
+    def DeleteEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+        chk_type('endpoints', endpoints, list)
+        if len(endpoints) == 0: return []
+        self._compose_config_rules(endpoints)
+        config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=True)
+        LOGGER.debug('config_rules_per_device={:s}'.format(str(config_rules_per_device)))
+        results = self._do_configurations(config_rules_per_device, endpoints, delete=True)
+        LOGGER.debug('results={:s}'.format(str(results)))
+        return results
+
+    def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('constraints', constraints, list)
+        if len(constraints) == 0: return []
+
+        msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(constraints)))
+        return [True for _ in range(len(constraints))]
+
+    def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('constraints', constraints, list)
+        if len(constraints) == 0: return []
+
+        msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(constraints)))
+        return [True for _ in range(len(constraints))]
+
+    def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+
+        results = []
+        for resource in resources:
+            try:
+                resource_value = json.loads(resource[1])
+                self.__settings_handler.set(resource[0], resource_value)
+                results.append(True)
+            except Exception as e: # pylint: disable=broad-except
+                LOGGER.exception('Unable to SetConfig({:s})'.format(str(resource)))
+                results.append(e)
+
+        return results
+
+    def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+
+        results = []
+        for resource in resources:
+            try:
+                self.__settings_handler.delete(resource[0])
+            except Exception as e: # pylint: disable=broad-except
+                LOGGER.exception('Unable to DeleteConfig({:s})'.format(str(resource)))
+                results.append(e)
+
+        return results
diff --git a/src/service/tests/test_l3nm_gnmi_static_rule_gen/MockTaskExecutor.py b/src/service/tests/test_l3nm_gnmi_static_rule_gen/MockTaskExecutor.py
new file mode 100644
index 0000000000000000000000000000000000000000..765b04477efdf06bfef934e96329887e898aa1b4
--- /dev/null
+++ b/src/service/tests/test_l3nm_gnmi_static_rule_gen/MockTaskExecutor.py
@@ -0,0 +1,57 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from enum import Enum
+from typing import Dict, Optional, Union
+from common.method_wrappers.ServiceExceptions import NotFoundException
+from common.proto.context_pb2 import Connection, Device, DeviceId, Service
+from service.service.tools.ObjectKeys import get_device_key
+
+LOGGER = logging.getLogger(__name__)
+
+CacheableObject = Union[Connection, Device, Service]
+
+class CacheableObjectType(Enum):
+    CONNECTION = 'connection'
+    DEVICE     = 'device'
+    SERVICE    = 'service'
+
+class MockTaskExecutor:
+    def __init__(self) -> None:
+        self._grpc_objects_cache : Dict[str, CacheableObject] = dict()
+
+    # ----- Common methods ---------------------------------------------------------------------------------------------
+
+    def _load_grpc_object(self, object_type : CacheableObjectType, object_key : str) -> Optional[CacheableObject]:
+        object_key = '{:s}:{:s}'.format(object_type.value, object_key)
+        return self._grpc_objects_cache.get(object_key)
+
+    def _store_grpc_object(self, object_type : CacheableObjectType, object_key : str, grpc_object) -> None:
+        object_key = '{:s}:{:s}'.format(object_type.value, object_key)
+        self._grpc_objects_cache[object_key] = grpc_object
+    
+    def _delete_grpc_object(self, object_type : CacheableObjectType, object_key : str) -> None:
+        object_key = '{:s}:{:s}'.format(object_type.value, object_key)
+        self._grpc_objects_cache.pop(object_key, None)
+
+    def get_device(self, device_id : DeviceId) -> Device:
+        device_key = get_device_key(device_id)
+        device = self._load_grpc_object(CacheableObjectType.DEVICE, device_key)
+        if device is None: raise NotFoundException('Device', device_key)
+        return device
+
+    def configure_device(self, device : Device) -> None:
+        device_key = get_device_key(device.device_id)
+        self._store_grpc_object(CacheableObjectType.DEVICE, device_key, device)
diff --git a/src/service/tests/test_l3nm_gnmi_static_rule_gen/__init__.py b/src/service/tests/test_l3nm_gnmi_static_rule_gen/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/service/tests/test_l3nm_gnmi_static_rule_gen/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/service/tests/test_l3nm_gnmi_static_rule_gen/test_unitary.py b/src/service/tests/test_l3nm_gnmi_static_rule_gen/test_unitary.py
new file mode 100644
index 0000000000000000000000000000000000000000..43709b036b8158ddfc59453aa798fa2d303906e0
--- /dev/null
+++ b/src/service/tests/test_l3nm_gnmi_static_rule_gen/test_unitary.py
@@ -0,0 +1,147 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Run with:
+# $ PYTHONPATH=./src python -m service.tests.test_l3nm_gnmi_static_rule_gen.test_unitary
+
+import logging
+from typing import List, Optional, Tuple
+from common.DeviceTypes import DeviceTypeEnum
+from common.proto.context_pb2 import Device, DeviceOperationalStatusEnum, Service
+from common.tools.object_factory.ConfigRule import json_config_rule_set
+from common.tools.object_factory.Device import json_device, json_device_id
+from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id
+from common.tools.object_factory.Service import json_service_l3nm_planned
+from .MockServiceHandler import MockServiceHandler
+from .MockTaskExecutor import CacheableObjectType, MockTaskExecutor
+
+logging.basicConfig(level=logging.DEBUG)
+LOGGER = logging.getLogger(__name__)
+
+SERVICE_DC1_DC2 = Service(**json_service_l3nm_planned(
+    'svc-dc1-dc2-uuid',
+    endpoint_ids=[
+        json_endpoint_id(json_device_id('DC1'), 'int'),
+        json_endpoint_id(json_device_id('DC2'), 'int'),
+    ],
+    config_rules=[
+        json_config_rule_set('/device[DC1]/endpoint[eth0]/settings', {
+            'ipv4_address': '192.168.10.10', 'ipv4_prefix_len': 24, 'sub_interface_index': 0
+        }),
+        json_config_rule_set('/device[R1]/endpoint[1/2]/settings', {
+            'ipv4_address': '10.0.1.1', 'ipv4_prefix_len': 24, 'sub_interface_index': 0
+        }),
+        #json_config_rule_set('/device[R2]/endpoint[1/2]/settings', {
+        #    'ipv4_address': '10.0.2.1', 'ipv4_prefix_len': 24, 'sub_interface_index': 0
+        #}),
+        json_config_rule_set('/device[DC2]/endpoint[eth0]/settings', {
+            'ipv4_address': '192.168.20.10', 'ipv4_prefix_len': 24, 'sub_interface_index': 0
+        }),
+    ]
+))
+
+SERVICE_DC1_DC3 = Service(**json_service_l3nm_planned(
+    'svc-dc1-dc3-uuid',
+    endpoint_ids=[
+        json_endpoint_id(json_device_id('DC1'), 'int'),
+        json_endpoint_id(json_device_id('DC3'), 'int'),
+    ],
+    config_rules=[
+        json_config_rule_set('/device[DC1]/endpoint[eth0]/settings', {
+            'ipv4_address': '192.168.10.10', 'ipv4_prefix_len': 24, 'sub_interface_index': 0
+        }),
+        #json_config_rule_set('/device[R1]/endpoint[1/2]/settings', {
+        #    'ipv4_address': '10.0.1.1', 'ipv4_prefix_len': 24, 'sub_interface_index': 0
+        #}),
+        json_config_rule_set('/device[R4]/endpoint[1/1]/settings', {
+            'ipv4_address': '10.0.4.1', 'ipv4_prefix_len': 24, 'sub_interface_index': 0
+        }),
+        json_config_rule_set('/device[DC3]/endpoint[eth0]/settings', {
+            'ipv4_address': '192.168.30.10', 'ipv4_prefix_len': 24, 'sub_interface_index': 0
+        }),
+    ]
+))
+
+CONNECTION_ENDPOINTS_DC1_DC2 : List[Tuple[str, str, Optional[str]]] = [
+    ('DC1', 'int',  None), ('DC1', 'eth0', None),
+    ('R1',  '1/1',  None), ('R1',  '1/2',  None),
+    ('R2',  '1/1',  None), ('R2',  '1/2',  None),
+    ('R3',  '1/1',  None), ('R3',  '1/2',  None),
+    ('DC2', 'eth0', None), ('DC2', 'int',  None),
+]
+
+CONNECTION_ENDPOINTS_DC1_DC3 : List[Tuple[str, str, Optional[str]]] = [
+    ('DC1', 'int',  None), ('DC1', 'eth0', None),
+    ('R1',  '1/1',  None), ('R1',  '1/2',  None),
+    ('R2',  '1/1',  None), ('R2',  '1/3',  None),
+    ('R4',  '1/1',  None), ('R4',  '1/2',  None),
+    ('DC3', 'eth0', None), ('DC3', 'int',  None),
+]
+
+def test_l3nm_gnmi_static_rule_gen() -> None:
+    dev_op_st_enabled = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+
+    mock_task_executor = MockTaskExecutor()
+    mock_task_executor._store_grpc_object(CacheableObjectType.DEVICE, 'DC1', Device(**json_device(
+        'uuid-DC1', DeviceTypeEnum.EMULATED_DATACENTER.value, dev_op_st_enabled, name='DC1', endpoints=[
+            json_endpoint(json_device_id('uuid-DC1'), 'uuid-int',  'packet', name='int' ),
+            json_endpoint(json_device_id('uuid-DC1'), 'uuid-eth0', 'packet', name='eth0'),
+        ]
+    )))
+    mock_task_executor._store_grpc_object(CacheableObjectType.DEVICE, 'DC2', Device(**json_device(
+        'uuid-DC2', DeviceTypeEnum.EMULATED_DATACENTER.value, dev_op_st_enabled, name='DC2', endpoints=[
+            json_endpoint(json_device_id('uuid-DC2'), 'uuid-int',  'packet', name='int' ),
+            json_endpoint(json_device_id('uuid-DC2'), 'uuid-eth0', 'packet', name='eth0'),
+        ]
+    )))
+    mock_task_executor._store_grpc_object(CacheableObjectType.DEVICE, 'DC3', Device(**json_device(
+        'uuid-DC3', DeviceTypeEnum.EMULATED_DATACENTER.value, dev_op_st_enabled, name='DC3', endpoints=[
+            json_endpoint(json_device_id('uuid-DC3'), 'uuid-int',  'packet', name='int' ),
+            json_endpoint(json_device_id('uuid-DC3'), 'uuid-eth0', 'packet', name='eth0'),
+        ]
+    )))
+    mock_task_executor._store_grpc_object(CacheableObjectType.DEVICE, 'R1', Device(**json_device(
+        'uuid-R1', DeviceTypeEnum.EMULATED_PACKET_ROUTER.value, dev_op_st_enabled, name='R1', endpoints=[
+            json_endpoint(json_device_id('uuid-R1'), 'uuid-1/1', 'packet', name='1/1'),
+            json_endpoint(json_device_id('uuid-R1'), 'uuid-1/2', 'packet', name='1/2'),
+        ]
+    )))
+    mock_task_executor._store_grpc_object(CacheableObjectType.DEVICE, 'R2', Device(**json_device(
+        'uuid-R2', DeviceTypeEnum.EMULATED_PACKET_ROUTER.value, dev_op_st_enabled, name='R2', endpoints=[
+            json_endpoint(json_device_id('uuid-R2'), 'uuid-1/1', 'packet', name='1/1'),
+            json_endpoint(json_device_id('uuid-R2'), 'uuid-1/2', 'packet', name='1/2'),
+            json_endpoint(json_device_id('uuid-R2'), 'uuid-1/3', 'packet', name='1/3'),
+        ]
+    )))
+    mock_task_executor._store_grpc_object(CacheableObjectType.DEVICE, 'R3', Device(**json_device(
+        'uuid-R3', DeviceTypeEnum.EMULATED_PACKET_ROUTER.value, dev_op_st_enabled, name='R3', endpoints=[
+            json_endpoint(json_device_id('uuid-R3'), 'uuid-1/1', 'packet', name='1/1'),
+            json_endpoint(json_device_id('uuid-R3'), 'uuid-1/2', 'packet', name='1/2'),
+        ]
+    )))
+    mock_task_executor._store_grpc_object(CacheableObjectType.DEVICE, 'R4', Device(**json_device(
+        'uuid-R4', DeviceTypeEnum.EMULATED_PACKET_ROUTER.value, dev_op_st_enabled, name='R4', endpoints=[
+            json_endpoint(json_device_id('uuid-R4'), 'uuid-1/1', 'packet', name='1/1'),
+            json_endpoint(json_device_id('uuid-R4'), 'uuid-1/2', 'packet', name='1/2'),
+        ]
+    )))
+
+    mock_service_handler = MockServiceHandler(SERVICE_DC1_DC2, mock_task_executor)
+    mock_service_handler.SetEndpoint(CONNECTION_ENDPOINTS_DC1_DC2)
+
+    mock_service_handler = MockServiceHandler(SERVICE_DC1_DC3, mock_task_executor)
+    mock_service_handler.SetEndpoint(CONNECTION_ENDPOINTS_DC1_DC3)
+
+if __name__ == '__main__':
+    test_l3nm_gnmi_static_rule_gen()
diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml
index 808aebf2173361e05fee5ed2806b9c7aa6362753..b7da988bf33175e149906243f28b0df0d78d9d8b 100644
--- a/src/tests/.gitlab-ci.yml
+++ b/src/tests/.gitlab-ci.yml
@@ -20,4 +20,5 @@ include:
   #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml'
   #- local: '/src/tests/ofc23/.gitlab-ci.yml'
   - local: '/src/tests/ofc24/.gitlab-ci.yml'
+  - local: '/src/tests/eucnc24/.gitlab-ci.yml'
   #- local: '/src/tests/ecoc24/.gitlab-ci.yml'
diff --git a/src/tests/eucnc24/.gitignore b/src/tests/eucnc24/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..24a4b233365e23a9462f4b64e8b60fef6a62bee4
--- /dev/null
+++ b/src/tests/eucnc24/.gitignore
@@ -0,0 +1,5 @@
+clab-*/
+images/
+*.clab.yml.bak
+*.tar
+*.tar.gz
diff --git a/src/tests/eucnc24/.gitlab-ci.yml b/src/tests/eucnc24/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5dec5e36cf2a2984fcbb073190fb386e16a4e8c3
--- /dev/null
+++ b/src/tests/eucnc24/.gitlab-ci.yml
@@ -0,0 +1,201 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build eucnc24:
+  variables:
+    TEST_NAME: 'eucnc24'
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker buildx build -t "${TEST_NAME}:latest" -f ./src/tests/${TEST_NAME}/Dockerfile .
+    - docker tag "${TEST_NAME}:latest" "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest"
+    - docker push "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/tests/${TEST_NAME}/**/*.{py,in,sh,yml}
+      - src/tests/${TEST_NAME}/Dockerfile
+      - .gitlab-ci.yml
+
+# Deploy TeraFlowSDN and Execute end-2-end test
+end2end_test eucnc24:
+  variables:
+    TEST_NAME: 'eucnc24'
+  stage: end2end_test
+  # Disable to force running it after all other tasks
+  #needs:
+  #  - build eucnc24
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - docker rm -f ${TEST_NAME} || true
+    - sudo containerlab destroy --all --cleanup || true
+
+  script:
+    # Download Docker image to run the test
+    - docker pull "${CI_REGISTRY_IMAGE}/${TEST_NAME}:latest"
+
+    # Check MicroK8s is ready
+    - microk8s status --wait-ready
+    - kubectl get pods --all-namespaces
+
+    # Deploy ContainerLab Scenario
+    - RUNNER_PATH=`pwd`
+    #- cd $PWD/src/tests/${TEST_NAME}
+    - mkdir -p /tmp/clab/${TEST_NAME}
+    - cp -R src/tests/${TEST_NAME}/clab/* /tmp/clab/${TEST_NAME}
+    - tree -la /tmp/clab/${TEST_NAME}
+    - cd /tmp/clab/${TEST_NAME}
+    - sudo containerlab deploy --reconfigure --topo eucnc24.clab.yml
+    - cd $RUNNER_PATH
+
+    # Wait for initialization of Device NOSes
+    - sleep 3
+    - docker ps -a
+
+    # Configure TeraFlowSDN deployment
+    # Uncomment if DEBUG log level is needed for the components
+    #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml
+    #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml
+    #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml
+    #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml
+    #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml
+    #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/monitoringservice.yaml
+
+    - source src/tests/${TEST_NAME}/deploy_specs.sh
+    #- export TFS_REGISTRY_IMAGES="${CI_REGISTRY_IMAGE}"
+    #- export TFS_SKIP_BUILD="YES"
+    #- export TFS_IMAGE_TAG="latest"
+    #- echo "TFS_REGISTRY_IMAGES=${CI_REGISTRY_IMAGE}"
+
+    # Deploy TeraFlowSDN
+    - ./deploy/crdb.sh
+    - ./deploy/nats.sh
+    - ./deploy/qdb.sh
+    - ./deploy/kafka.sh
+    - ./deploy/tfs.sh
+    - ./deploy/show.sh
+
+    ## Wait for Context to be subscribed to NATS
+    ## WARNING: this loop is infinite if there is no subscriber (such as monitoring).
+    ##          Investigate if we can use a counter to limit the number of iterations.
+    ##          For now, keep it commented out.
+    #- LOOP_MAX_ATTEMPTS=180
+    #- LOOP_COUNTER=0
+    #- >
+    #  while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do
+    #    echo "Attempt: $LOOP_COUNTER"
+    #    kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1;
+    #    sleep 1;
+    #    LOOP_COUNTER=$((LOOP_COUNTER + 1))
+    #    if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then
+    #      echo "Max attempts reached, exiting the loop."
+    #      break
+    #    fi
+    #  done
+    - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server
+
+    # Run end-to-end test: onboard scenario
+    - >
+      docker run -t --rm --name ${TEST_NAME} --network=host 
+      --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh"
+      --volume "$PWD/src/tests/${TEST_NAME}:/opt/results"
+      $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-onboarding.sh
+
+    # Run end-to-end test: configure service TFS
+    - >
+      docker run -t --rm --name ${TEST_NAME} --network=host 
+      --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh"
+      --volume "$PWD/src/tests/${TEST_NAME}:/opt/results"
+      $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-create.sh
+
+    # Run end-to-end test: test connectivity with ping
+    - sudo containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 192.168.1.10'
+    - sudo containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 192.168.1.1'
+    - sudo containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 192.168.2.1'
+    - sudo containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 192.168.2.10'
+    - sudo containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 192.168.3.10'
+
+    # Run end-to-end test: deconfigure service TFS
+    - >
+      docker run -t --rm --name ${TEST_NAME} --network=host 
+      --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh"
+      --volume "$PWD/src/tests/${TEST_NAME}:/opt/results"
+      $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-remove.sh
+
+    # Run end-to-end test: configure service IETF
+    - >
+      docker run -t --rm --name ${TEST_NAME} --network=host 
+      --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh"
+      --volume "$PWD/src/tests/${TEST_NAME}:/opt/results"
+      $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-create.sh
+
+    # Run end-to-end test: test connectivity with ping
+    - sudo containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 192.168.1.10'
+    - sudo containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 192.168.1.1'
+    - sudo containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 192.168.2.1'
+    - sudo containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 192.168.2.10'
+    - sudo containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 192.168.3.10'
+
+    # Run end-to-end test: deconfigure service IETF
+    - >
+      docker run -t --rm --name ${TEST_NAME} --network=host 
+      --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh"
+      --volume "$PWD/src/tests/${TEST_NAME}:/opt/results"
+      $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-remove.sh
+
+    # Run end-to-end test: cleanup scenario
+    - >
+      docker run -t --rm --name ${TEST_NAME} --network=host 
+      --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh"
+      --volume "$PWD/src/tests/${TEST_NAME}:/opt/results"
+      $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-cleanup.sh
+
+  after_script:
+    # Dump TeraFlowSDN component logs
+    - source src/tests/${TEST_NAME}/deploy_specs.sh
+    - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server
+    - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/deviceservice -c server
+    - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/pathcompservice -c frontend
+    - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/serviceservice -c server
+    - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/nbiservice -c server
+    #- kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringservice -c server
+
+    # Destroy Scenario
+    - docker rm -f ${TEST_NAME} || true
+    - RUNNER_PATH=`pwd`
+    #- cd $PWD/src/tests/${TEST_NAME}
+    - cd /tmp/clab/${TEST_NAME}
+    - sudo containerlab destroy --topo eucnc24.clab.yml --cleanup || true
+    - sudo rm -rf clab-eucnc24/ .eucnc24.clab.yml.bak || true
+    - cd $RUNNER_PATH
+    - kubectl delete namespaces tfs || true
+
+    # Clean old docker images
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+
+  #coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+  artifacts:
+      when: always
+      reports:
+        junit: ./src/tests/${TEST_NAME}/report_*.xml
diff --git a/src/tests/eucnc24/Dockerfile b/src/tests/eucnc24/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..23a38bace79880f37fe7957566f7d84030fa7fd8
--- /dev/null
+++ b/src/tests/eucnc24/Dockerfile
@@ -0,0 +1,84 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ git && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/tests/eucnc24
+WORKDIR /var/teraflow/tests/eucnc24
+COPY src/tests/eucnc24/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/__init__.py ./__init__.py
+COPY src/common/*.py ./common/
+COPY src/common/tests/. ./common/tests/
+COPY src/common/tools/. ./common/tools/
+COPY src/context/__init__.py context/__init__.py
+COPY src/context/client/. context/client/
+COPY src/device/__init__.py device/__init__.py
+COPY src/device/client/. device/client/
+COPY src/monitoring/__init__.py monitoring/__init__.py
+COPY src/monitoring/client/. monitoring/client/
+COPY src/service/__init__.py service/__init__.py
+COPY src/service/client/. service/client/
+COPY src/slice/__init__.py slice/__init__.py
+COPY src/slice/client/. slice/client/
+COPY src/tests/*.py ./tests/
+COPY src/tests/eucnc24/__init__.py ./tests/eucnc24/__init__.py
+COPY src/tests/eucnc24/data/. ./tests/eucnc24/data/
+COPY src/tests/eucnc24/tests/. ./tests/eucnc24/tests/
+COPY src/tests/eucnc24/scripts/. ./
+
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install tree && \
+    rm -rf /var/lib/apt/lists/*
+
+RUN tree -la /var/teraflow
diff --git a/src/tests/eucnc24/README.md b/src/tests/eucnc24/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..f8c2f9d49793b8e0751ea4ea09bf893e7ceae0b6
--- /dev/null
+++ b/src/tests/eucnc24/README.md
@@ -0,0 +1,123 @@
+# DataPlane-in-a-Box - Control an Emulated DataPlane through TeraFlowSDN
+
+## Emulated DataPlane Deployment
+- ContainerLab
+- Scenario
+- Descriptor
+
+## TeraFlowSDN Deployment
+```bash
+cd ~/tfs-ctrl
+source dataplane-in-a-box/deploy_specs.sh
+./deploy/all.sh
+```
+
+# ContainerLab - Arista cEOS - Commands
+
+## Download and install ContainerLab
+```bash
+sudo bash -c "$(curl -sL https://get.containerlab.dev)" -- -v 0.48.6
+```
+
+## Download Arista cEOS image and create Docker image
+```bash
+cd ~/tfs-ctrl/dataplane-in-a-box
+docker import arista/cEOS64-lab-4.31.2F.tar ceos:4.31.2F
+```
+
+## Deploy scenario
+```bash
+cd ~/tfs-ctrl/dataplane-in-a-box
+sudo containerlab deploy --topo arista.clab.yml
+```
+
+## Inspect scenario
+```bash
+cd ~/tfs-ctrl/dataplane-in-a-box
+sudo containerlab inspect --topo arista.clab.yml
+```
+
+## Destroy scenario
+```bash
+cd ~/tfs-ctrl/dataplane-in-a-box
+sudo containerlab destroy --topo arista.clab.yml
+sudo rm -rf clab-arista/ .arista.clab.yml.bak
+```
+
+## Access cEOS Bash
+```bash
+docker exec -it clab-arista-r1 bash
+```
+
+## Access cEOS CLI
+```bash
+docker exec -it clab-arista-r1 Cli
+docker exec -it clab-arista-r2 Cli
+```
+
+## Configure ContainerLab clients
+```bash
+docker exec -it clab-arista-client1 bash
+    ip address add 192.168.1.10/24 dev eth1
+    ip route add 192.168.2.0/24 via 192.168.1.1
+    ip route add 192.168.3.0/24 via 192.168.1.1
+    ping 192.168.2.10
+    ping 192.168.3.10
+
+docker exec -it clab-arista-client2 bash
+    ip address add 192.168.2.10/24 dev eth1
+    ip route add 192.168.1.0/24 via 192.168.2.1
+    ip route add 192.168.3.0/24 via 192.168.2.1
+    ping 192.168.1.10
+    ping 192.168.3.10
+
+docker exec -it clab-arista-client3 bash
+    ip address add 192.168.3.10/24 dev eth1
+    ip route add 192.168.2.0/24 via 192.168.3.1
+    ip route add 192.168.3.0/24 via 192.168.3.1
+    ping 192.168.2.10
+    ping 192.168.3.10
+```
+
+## Install gNMIc
+```bash
+sudo bash -c "$(curl -sL https://get-gnmic.kmrd.dev)"
+```
+
+## gNMI Capabilities request
+```bash
+gnmic --address clab-arista-wan1 --port 6030 --username admin --password admin --insecure capabilities
+```
+
+## gNMI Get request
+```bash
+gnmic --address clab-arista-wan1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path / > wan1.json
+gnmic --address clab-arista-wan1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path /interfaces/interface > wan1-ifaces.json
+```
+
+## gNMI Set request
+```bash
+#gnmic --address clab-arista-wan1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --update-path /system/config/hostname --update-value srl11
+#gnmic --address clab-arista-wan1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path /system/config/hostname
+```
+
+## Subscribe request
+```bash
+gnmic --address clab-arista-wan1 --port 6030 --username admin --password admin --insecure --encoding json_ietf subscribe --path /interfaces/interface[name=Management0]/state/
+
+# In another terminal, you can generate traffic opening SSH connection
+ssh admin@clab-arista-wan1
+```
+
+# Check configurations done:
+```bash
+gnmic --address clab-arista-wan1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path '/network-instances' > wan1-nis.json
+gnmic --address clab-arista-wan1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path '/interfaces' > wan1-ifs.json
+```
+
+# Delete elements:
+```bash
+--address clab-arista-wan1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/network-instances/network-instance[name=b19229e8]'
+--address clab-arista-wan1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]'
+--address clab-arista-wan1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]'
+```
diff --git a/src/tests/eucnc24/__init__.py b/src/tests/eucnc24/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/tests/eucnc24/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/eucnc24/clab/eucnc24.clab.yml b/src/tests/eucnc24/clab/eucnc24.clab.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c9182ba1d405cce514d54494da37d27586fd83ac
--- /dev/null
+++ b/src/tests/eucnc24/clab/eucnc24.clab.yml
@@ -0,0 +1,70 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TFS - Arista devices + Linux clients
+
+name: eucnc24
+
+mgmt:
+  network: mgmt-net
+  ipv4-subnet: 172.20.20.0/24
+
+topology:
+  kinds:
+    arista_ceos:
+      kind: arista_ceos
+      #image: ceos:4.30.4M
+      #image: ceos:4.31.2F
+      #image: ceos:4.31.5M
+      #image: ceos:4.32.0F
+      image: ceos:4.32.2F
+      #image: ceos:4.32.2.1F
+    linux:
+      kind: linux
+      image: ghcr.io/hellt/network-multitool:latest
+
+  nodes:
+    r1:
+      kind: arista_ceos
+      mgmt-ipv4: 172.20.20.101
+
+    r2:
+      kind: arista_ceos
+      mgmt-ipv4: 172.20.20.102
+
+    r3:
+      kind: arista_ceos
+      mgmt-ipv4: 172.20.20.103
+
+    dc1:
+      kind: linux
+      mgmt-ipv4: 172.20.20.211
+      exec:
+        - ip link set address 00:c1:ab:00:01:01 dev eth1
+        - ip address add 192.168.1.10/24 dev eth1
+        - ip route add 192.168.2.0/24 via 192.168.1.1
+
+    dc2:
+      kind: linux
+      mgmt-ipv4: 172.20.20.221
+      exec:
+        - ip link set address 00:c1:ab:00:02:01 dev eth1
+        - ip address add 192.168.2.10/24 dev eth1
+        - ip route add 192.168.1.0/24 via 192.168.2.1
+
+  links:
+    - endpoints: ["r1:eth2", "r2:eth1"]
+    - endpoints: ["r2:eth3", "r3:eth2"]
+    - endpoints: ["r1:eth10", "dc1:eth1"]
+    - endpoints: ["r3:eth10", "dc2:eth1"]
diff --git a/src/tests/eucnc24/data/ietf-l3vpn-service.json b/src/tests/eucnc24/data/ietf-l3vpn-service.json
new file mode 100644
index 0000000000000000000000000000000000000000..a6297b28f0fea94dcc8a457ad2b45d38e77aa4ea
--- /dev/null
+++ b/src/tests/eucnc24/data/ietf-l3vpn-service.json
@@ -0,0 +1,83 @@
+{
+    "ietf-l3vpn-svc:l3vpn-svc": {
+        "vpn-services": {"vpn-service": [{"vpn-id": "ietf-l3vpn-svc"}]},
+        "sites": {
+            "site": [
+                {
+                    "site-id": "site_DC1",
+                    "management": {"type": "ietf-l3vpn-svc:provider-managed"},
+                    "locations": {"location": [{"location-id": "DC1"}]},
+                    "devices": {"device": [{"device-id": "dc1", "location": "DC1"}]},
+                    "site-network-accesses": {
+                        "site-network-access": [
+                            {
+                                "site-network-access-id": "int",
+                                "site-network-access-type": "ietf-l3vpn-svc:multipoint",
+                                "device-reference": "dc1",
+                                "vpn-attachment": {"vpn-id": "ietf-l3vpn-svc", "site-role": "ietf-l3vpn-svc:spoke-role"},
+                                "ip-connection": {
+                                    "ipv4": {
+                                        "address-allocation-type": "ietf-l3vpn-svc:static-address",
+                                        "addresses": {
+                                            "provider-address": "192.168.1.1",
+                                            "customer-address": "192.168.1.10",
+                                            "prefix-length": 24
+                                        }
+                                    }
+                                },
+                                "service": {
+                                    "svc-mtu": 1500,
+                                    "svc-input-bandwidth": 1000000000,
+                                    "svc-output-bandwidth": 1000000000,
+                                    "qos": {"qos-profile": {"classes": {"class": [{
+                                        "class-id": "qos-realtime",
+                                        "direction": "ietf-l3vpn-svc:both",
+                                        "latency": {"latency-boundary": 10},
+                                        "bandwidth": {"guaranteed-bw-percent": 100}
+                                    }]}}}
+                                }
+                            }
+                        ]
+                    }
+                },
+                {
+                    "site-id": "site_DC2",
+                    "management": {"type": "ietf-l3vpn-svc:provider-managed"},
+                    "locations": {"location": [{"location-id": "DC2"}]},
+                    "devices": {"device": [{"device-id": "dc2", "location": "DC2"}]},
+                    "site-network-accesses": {
+                        "site-network-access": [
+                            {
+                                "site-network-access-id": "int",
+                                "site-network-access-type": "ietf-l3vpn-svc:multipoint",
+                                "device-reference": "dc2",
+                                "vpn-attachment": {"vpn-id": "ietf-l3vpn-svc", "site-role": "ietf-l3vpn-svc:hub-role"},
+                                "ip-connection": {
+                                    "ipv4": {
+                                        "address-allocation-type": "ietf-l3vpn-svc:static-address",
+                                        "addresses": {
+                                            "provider-address": "192.168.2.1",
+                                            "customer-address": "192.168.2.10",
+                                            "prefix-length": 24
+                                        }
+                                    }
+                                },
+                                "service": {
+                                    "svc-mtu": 1500,
+                                    "svc-input-bandwidth": 1000000000,
+                                    "svc-output-bandwidth": 1000000000,
+                                    "qos": {"qos-profile": {"classes": {"class": [{
+                                        "class-id": "qos-realtime",
+                                        "direction": "ietf-l3vpn-svc:both",
+                                        "latency": {"latency-boundary": 10},
+                                        "bandwidth": {"guaranteed-bw-percent": 100}
+                                    }]}}}
+                                }
+                            }
+                        ]
+                    }
+                }
+            ]
+        }
+    }
+}
diff --git a/src/tests/eucnc24/data/tfs-service.json b/src/tests/eucnc24/data/tfs-service.json
new file mode 100644
index 0000000000000000000000000000000000000000..397fc84789111932da047acd22c7bc787888657f
--- /dev/null
+++ b/src/tests/eucnc24/data/tfs-service.json
@@ -0,0 +1,26 @@
+{
+    "services": [
+        {
+            "service_id": {
+                "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "tfs-l3vpn-svc"}
+            },
+            "service_type": "SERVICETYPE_L3NM",
+            "service_status": {"service_status": "SERVICESTATUS_PLANNED"},
+            "service_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "int"}},
+                {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "int"}}
+            ],
+            "service_constraints": [],
+            "service_config": {"config_rules": [
+                {"action": "CONFIGACTION_SET", "custom": {
+                    "resource_key": "/device[dc1]/endpoint[eth1]/settings",
+                    "resource_value": {"address_ip": "192.168.1.10", "address_prefix": 24, "index": 0}
+                }},
+                {"action": "CONFIGACTION_SET", "custom": {
+                    "resource_key": "/device[dc2]/endpoint[eth1]/settings",
+                    "resource_value": {"address_ip": "192.168.2.10", "address_prefix": 24, "index": 0}
+                }}
+            ]}
+        }
+    ]
+}
diff --git a/src/tests/eucnc24/data/tfs-topology.json b/src/tests/eucnc24/data/tfs-topology.json
new file mode 100644
index 0000000000000000000000000000000000000000..ac87af62d31e4728c12687c525233d8e840d7441
--- /dev/null
+++ b/src/tests/eucnc24/data/tfs-topology.json
@@ -0,0 +1,126 @@
+{
+    "contexts": [
+        {"context_id": {"context_uuid": {"uuid": "admin"}}}
+    ],
+    "topologies": [
+        {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "dc1"}}, "device_type": "emu-datacenter",
+            "device_drivers": ["DEVICEDRIVER_UNDEFINED"],
+            "device_config": {"config_rules": [
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "eth1", "type": "copper"}, {"uuid": "int", "type": "copper"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "dc2"}}, "device_type": "emu-datacenter",
+            "device_drivers": ["DEVICEDRIVER_UNDEFINED"],
+            "device_config": {"config_rules": [
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"uuid": "eth1", "type": "copper"}, {"uuid": "int", "type": "copper"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "r1"}}, "device_type": "packet-router",
+            "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"],
+            "device_config": {"config_rules": [
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.101"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin", "use_tls": false
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "r2"}}, "device_type": "packet-router",
+            "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"],
+            "device_config": {"config_rules": [
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.102"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin", "use_tls": false
+                }}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "r3"}}, "device_type": "packet-router",
+            "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"],
+            "device_config": {"config_rules": [
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.103"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}},
+                {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {
+                    "username": "admin", "password": "admin", "use_tls": false
+                }}}
+            ]}
+        }
+    ],
+    "links": [
+        {
+            "link_id": {"link_uuid": {"uuid": "r1/Ethernet2==r2/Ethernet1"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet2"}},
+                {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "r2/Ethernet1==r1/Ethernet2"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet1"}},
+                {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet2"}}
+            ]
+        },
+
+        {
+            "link_id": {"link_uuid": {"uuid": "r2/Ethernet3==r3/Ethernet2"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet3"}},
+                {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "r3/Ethernet2==r2/Ethernet3"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet2"}},
+                {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet3"}}
+            ]
+        },
+
+        {
+            "link_id": {"link_uuid": {"uuid": "r1/Ethernet10==dc1/eth1"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet10"}},
+                {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "eth1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "dc1/eth1==r1/Ethernet10"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "eth1"}},
+                {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet10"}}
+            ]
+        },
+
+        {
+            "link_id": {"link_uuid": {"uuid": "r3/Ethernet10==dc2/eth1"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet10"}},
+                {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "eth1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "dc2/eth1==r3/Ethernet10"}},
+            "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "eth1"}},
+                {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet10"}}
+            ]
+        }
+    ]
+}
diff --git a/src/tests/eucnc24/deploy-scripts/clab-cli-dc1.sh b/src/tests/eucnc24/deploy-scripts/clab-cli-dc1.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d0ee18dcfd7eac03b108e163200d14b532d8db8f
--- /dev/null
+++ b/src/tests/eucnc24/deploy-scripts/clab-cli-dc1.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-eucnc24-dc1 bash
diff --git a/src/tests/eucnc24/deploy-scripts/clab-cli-dc2.sh b/src/tests/eucnc24/deploy-scripts/clab-cli-dc2.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2867fedcb9051f6d73b78d571b33ed7ae25efd80
--- /dev/null
+++ b/src/tests/eucnc24/deploy-scripts/clab-cli-dc2.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-eucnc24-dc2 bash
diff --git a/src/tests/eucnc24/deploy-scripts/clab-cli-r1.sh b/src/tests/eucnc24/deploy-scripts/clab-cli-r1.sh
new file mode 100755
index 0000000000000000000000000000000000000000..69141a0ae73ee23274b823242b0f864e1527d505
--- /dev/null
+++ b/src/tests/eucnc24/deploy-scripts/clab-cli-r1.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-eucnc24-r1 Cli
diff --git a/src/tests/eucnc24/deploy-scripts/clab-cli-r2.sh b/src/tests/eucnc24/deploy-scripts/clab-cli-r2.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7860d1d21f07a293f3bcbc65575625568a49a41c
--- /dev/null
+++ b/src/tests/eucnc24/deploy-scripts/clab-cli-r2.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-eucnc24-r2 Cli
diff --git a/src/tests/eucnc24/deploy-scripts/clab-cli-r3.sh b/src/tests/eucnc24/deploy-scripts/clab-cli-r3.sh
new file mode 100755
index 0000000000000000000000000000000000000000..801c3223d7c59e767bcc64b0cc331553a34bb4b4
--- /dev/null
+++ b/src/tests/eucnc24/deploy-scripts/clab-cli-r3.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-eucnc24-r3 Cli
diff --git a/src/tests/eucnc24/deploy-scripts/clab-deploy.sh b/src/tests/eucnc24/deploy-scripts/clab-deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ae1676ada75ab6a5ce671fae187c0a94ffc62331
--- /dev/null
+++ b/src/tests/eucnc24/deploy-scripts/clab-deploy.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cd ~/tfs-ctrl/src/tests/eucnc24
+sudo containerlab deploy --topo eucnc24.clab.yml
diff --git a/src/tests/eucnc24/deploy-scripts/clab-destroy.sh b/src/tests/eucnc24/deploy-scripts/clab-destroy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6e58a3c490c196073f4cb259e11d45dd4ff2a1f8
--- /dev/null
+++ b/src/tests/eucnc24/deploy-scripts/clab-destroy.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cd ~/tfs-ctrl/src/tests/eucnc24
+sudo containerlab destroy --topo eucnc24.clab.yml
+sudo rm -rf clab-eucnc24/ .eucnc24.clab.yml.bak
diff --git a/src/tests/eucnc24/deploy-scripts/clab-inspect.sh b/src/tests/eucnc24/deploy-scripts/clab-inspect.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0dd6dce12dc60e775edc6ab449d1d448f37bd686
--- /dev/null
+++ b/src/tests/eucnc24/deploy-scripts/clab-inspect.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cd ~/tfs-ctrl/src/tests/eucnc24
+sudo containerlab inspect --topo eucnc24.clab.yml
diff --git a/src/tests/eucnc24/deploy_specs.sh b/src/tests/eucnc24/deploy_specs.sh
new file mode 100755
index 0000000000000000000000000000000000000000..aa72575a97f7b73ce916448307c3e1773fac57c4
--- /dev/null
+++ b/src/tests/eucnc24/deploy_specs.sh
@@ -0,0 +1,211 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+#export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service nbi"
+
+# Uncomment to activate Monitoring (old)
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Monitoring Framework (new)
+#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation"
+
+# Uncomment to activate QoS Profiles
+#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile"
+
+# Uncomment to activate BGP-LS Speaker
+#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
+
+# Uncomment to activate Optical Controller
+#   To manage optical connections, "service" requires "opticalcontroller" to be deployed
+#   before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
+#   "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it.
+#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
+#    BEFORE="${TFS_COMPONENTS% service*}"
+#    AFTER="${TFS_COMPONENTS#* service}"
+#    export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}"
+#fi
+
+# Uncomment to activate ZTP
+#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp"
+
+# Uncomment to activate Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
+
+# Uncomment to activate Forecaster
+#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster"
+
+# Uncomment to activate E2E Orchestrator
+#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator"
+
+# Uncomment to activate DLT and Interdomain
+#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt"
+#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then
+#    export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk"
+#    export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem"
+#    export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt"
+#fi
+
+# Uncomment to activate QKD App
+#   To manage QKD Apps, "service" requires "qkd_app" to be deployed
+#   before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
+#   "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it.
+#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
+#    BEFORE="${TFS_COMPONENTS% service*}"
+#    AFTER="${TFS_COMPONENTS#* service}"
+#    export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}"
+#fi
+
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy TFS to.
+export TFS_K8S_NAMESPACE="tfs"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+
+# Uncomment to monitor performance of components
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
+# Set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# Disable skip-build flag to rebuild the Docker images.
+export TFS_SKIP_BUILD=""
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4222"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8222"
+
+# Set NATS installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/nats.sh for additional details
+export NATS_DEPLOY_MODE="single"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8812"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9009"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9000"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
+
+
+# ----- Apache Kafka -----------------------------------------------------------
+
+# Set the namespace where Apache Kafka will be deployed.
+export KFK_NAMESPACE="kafka"
+
+# Set the port Apache Kafka server will be exposed to.
+export KFK_SERVER_PORT="9092"
+
+# Set the flag to YES for redeploying of Apache Kafka
+export KFK_REDEPLOY=""
diff --git a/src/tests/eucnc24/redeploy-tfs.sh b/src/tests/eucnc24/redeploy-tfs.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b5ced0d12e026be4b9cb7eefcf343445b776f042
--- /dev/null
+++ b/src/tests/eucnc24/redeploy-tfs.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source ~/tfs-ctrl/src/tests/eucnc24/deploy_specs.sh
+./deploy/all.sh
diff --git a/src/tests/eucnc24/requirements.in b/src/tests/eucnc24/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..468af1a17931d6e545647e3e7a057433d74826b3
--- /dev/null
+++ b/src/tests/eucnc24/requirements.in
@@ -0,0 +1,15 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+requests==2.27.*
diff --git a/src/tests/eucnc24/scripts/run-cleanup.sh b/src/tests/eucnc24/scripts/run-cleanup.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3f697711a48f8d7fdf9fbd84d8100013ca349272
--- /dev/null
+++ b/src/tests/eucnc24/scripts/run-cleanup.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source /var/teraflow/tfs_runtime_env_vars.sh
+export PYTHONPATH=/var/teraflow
+pytest --verbose --log-level=INFO \
+    --junitxml=/opt/results/report_cleanup.xml \
+    /var/teraflow/tests/eucnc24/tests/test_cleanup.py
diff --git a/src/tests/eucnc24/scripts/run-onboarding.sh b/src/tests/eucnc24/scripts/run-onboarding.sh
new file mode 100755
index 0000000000000000000000000000000000000000..57fc435e417e02edca08ff92d5db03bf9332c0e0
--- /dev/null
+++ b/src/tests/eucnc24/scripts/run-onboarding.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source /var/teraflow/tfs_runtime_env_vars.sh
+export PYTHONPATH=/var/teraflow
+pytest --verbose --log-level=INFO \
+    --junitxml=/opt/results/report_onboarding.xml \
+    /var/teraflow/tests/eucnc24/tests/test_onboarding.py
diff --git a/src/tests/eucnc24/scripts/run-service-ietf-create.sh b/src/tests/eucnc24/scripts/run-service-ietf-create.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d2ec9eef51a1dbe311ddbc052d305ef0d8c92085
--- /dev/null
+++ b/src/tests/eucnc24/scripts/run-service-ietf-create.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source /var/teraflow/tfs_runtime_env_vars.sh
+export PYTHONPATH=/var/teraflow
+pytest --verbose --log-level=INFO \
+    --junitxml=/opt/results/report_service_ietf_create.xml \
+    /var/teraflow/tests/eucnc24/tests/test_service_ietf_create.py
diff --git a/src/tests/eucnc24/scripts/run-service-ietf-remove.sh b/src/tests/eucnc24/scripts/run-service-ietf-remove.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8c52636001fb8163b3a94fa0b229c7ab1264ab13
--- /dev/null
+++ b/src/tests/eucnc24/scripts/run-service-ietf-remove.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source /var/teraflow/tfs_runtime_env_vars.sh
+export PYTHONPATH=/var/teraflow
+pytest --verbose --log-level=INFO \
+    --junitxml=/opt/results/report_service_ietf_remove.xml \
+    /var/teraflow/tests/eucnc24/tests/test_service_ietf_remove.py
diff --git a/src/tests/eucnc24/scripts/run-service-tfs-create.sh b/src/tests/eucnc24/scripts/run-service-tfs-create.sh
new file mode 100755
index 0000000000000000000000000000000000000000..5395f38b3ab82a11361a066f2d80b750e636b010
--- /dev/null
+++ b/src/tests/eucnc24/scripts/run-service-tfs-create.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source /var/teraflow/tfs_runtime_env_vars.sh
+export PYTHONPATH=/var/teraflow
+pytest --verbose --log-level=INFO \
+    --junitxml=/opt/results/report_service_tfs_create.xml \
+    /var/teraflow/tests/eucnc24/tests/test_service_tfs_create.py
diff --git a/src/tests/eucnc24/scripts/run-service-tfs-remove.sh b/src/tests/eucnc24/scripts/run-service-tfs-remove.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d82d6d7738c24039fc4b8e878f48bb3c0c90f3fe
--- /dev/null
+++ b/src/tests/eucnc24/scripts/run-service-tfs-remove.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source /var/teraflow/tfs_runtime_env_vars.sh
+export PYTHONPATH=/var/teraflow
+pytest --verbose --log-level=INFO \
+    --junitxml=/opt/results/report_service_tfs_remove.xml \
+    /var/teraflow/tests/eucnc24/tests/test_service_tfs_remove.py
diff --git a/src/tests/eucnc24/tests/Fixtures.py b/src/tests/eucnc24/tests/Fixtures.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9d2ceb24488dbd5b3775c958358df419dec4172
--- /dev/null
+++ b/src/tests/eucnc24/tests/Fixtures.py
@@ -0,0 +1,43 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from monitoring.client.MonitoringClient import MonitoringClient
+from service.client.ServiceClient import ServiceClient
+
+@pytest.fixture(scope='session')
+def context_client():
+    _client = ContextClient()
+    yield _client
+    _client.close()
+
+@pytest.fixture(scope='session')
+def device_client():
+    _client = DeviceClient()
+    yield _client
+    _client.close()
+
+@pytest.fixture(scope='session')
+def monitoring_client():
+    _client = MonitoringClient()
+    yield _client
+    _client.close()
+
+@pytest.fixture(scope='session')
+def service_client():
+    _client = ServiceClient()
+    yield _client
+    _client.close()
diff --git a/src/tests/eucnc24/tests/Tools.py b/src/tests/eucnc24/tests/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..e983ffefff3d5aea6b078efa9bb586202957a5e1
--- /dev/null
+++ b/src/tests/eucnc24/tests/Tools.py
@@ -0,0 +1,109 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum, logging, requests
+from typing import Any, Dict, List, Optional, Set, Union
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_http
+
+NBI_ADDRESS  = get_service_host(ServiceNameEnum.NBI)
+NBI_PORT     = get_service_port_http(ServiceNameEnum.NBI)
+NBI_USERNAME = 'admin'
+NBI_PASSWORD = 'admin'
+NBI_BASE_URL = ''
+
+class RestRequestMethod(enum.Enum):
+    GET    = 'get'
+    POST   = 'post'
+    PUT    = 'put'
+    PATCH  = 'patch'
+    DELETE = 'delete'
+
+EXPECTED_STATUS_CODES : Set[int] = {
+    requests.codes['OK'        ],
+    requests.codes['CREATED'   ],
+    requests.codes['ACCEPTED'  ],
+    requests.codes['NO_CONTENT'],
+}
+
+def do_rest_request(
+    method : RestRequestMethod, url : str, body : Optional[Any] = None, timeout : int = 10,
+    allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES,
+    logger : Optional[logging.Logger] = None
+) -> Optional[Union[Dict, List]]:
+    request_url = 'http://{:s}:{:s}@{:s}:{:d}{:s}{:s}'.format(
+        NBI_USERNAME, NBI_PASSWORD, NBI_ADDRESS, NBI_PORT, str(NBI_BASE_URL), url
+    )
+
+    if logger is not None:
+        msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url))
+        if body is not None: msg += ' body={:s}'.format(str(body))
+        logger.warning(msg)
+    reply = requests.request(method.value, request_url, timeout=timeout, json=body, allow_redirects=allow_redirects)
+    if logger is not None:
+        logger.warning('Reply: {:s}'.format(str(reply.text)))
+    assert reply.status_code in expected_status_codes, 'Reply failed with status code {:d}'.format(reply.status_code)
+
+    if reply.content and len(reply.content) > 0: return reply.json()
+    return None
+
+def do_rest_get_request(
+    url : str, body : Optional[Any] = None, timeout : int = 10,
+    allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES,
+    logger : Optional[logging.Logger] = None
+) -> Optional[Union[Dict, List]]:
+    return do_rest_request(
+        RestRequestMethod.GET, url, body=body, timeout=timeout, allow_redirects=allow_redirects,
+        expected_status_codes=expected_status_codes, logger=logger
+    )
+
+def do_rest_post_request(
+    url : str, body : Optional[Any] = None, timeout : int = 10,
+    allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES,
+    logger : Optional[logging.Logger] = None
+) -> Optional[Union[Dict, List]]:
+    return do_rest_request(
+        RestRequestMethod.POST, url, body=body, timeout=timeout, allow_redirects=allow_redirects,
+        expected_status_codes=expected_status_codes, logger=logger
+    )
+
+def do_rest_put_request(
+    url : str, body : Optional[Any] = None, timeout : int = 10,
+    allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES,
+    logger : Optional[logging.Logger] = None
+) -> Optional[Union[Dict, List]]:
+    return do_rest_request(
+        RestRequestMethod.PUT, url, body=body, timeout=timeout, allow_redirects=allow_redirects,
+        expected_status_codes=expected_status_codes, logger=logger
+    )
+
+def do_rest_patch_request(
+    url : str, body : Optional[Any] = None, timeout : int = 10,
+    allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES,
+    logger : Optional[logging.Logger] = None
+) -> Optional[Union[Dict, List]]:
+    return do_rest_request(
+        RestRequestMethod.PATCH, url, body=body, timeout=timeout, allow_redirects=allow_redirects,
+        expected_status_codes=expected_status_codes, logger=logger
+    )
+
+def do_rest_delete_request(
+    url : str, body : Optional[Any] = None, timeout : int = 10,
+    allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES,
+    logger : Optional[logging.Logger] = None
+) -> Optional[Union[Dict, List]]:
+    return do_rest_request(
+        RestRequestMethod.DELETE, url, body=body, timeout=timeout, allow_redirects=allow_redirects,
+        expected_status_codes=expected_status_codes, logger=logger
+    )
diff --git a/src/tests/eucnc24/tests/__init__.py b/src/tests/eucnc24/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3
--- /dev/null
+++ b/src/tests/eucnc24/tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/eucnc24/tests/test_cleanup.py b/src/tests/eucnc24/tests/test_cleanup.py
new file mode 100644
index 0000000000000000000000000000000000000000..7cfed0caf295af9dd5f66fd263339e37a7c887a0
--- /dev/null
+++ b/src/tests/eucnc24/tests/test_cleanup.py
@@ -0,0 +1,44 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, os
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId
+from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario
+from common.tools.object_factory.Context import json_context_id
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from .Fixtures import context_client, device_client    # pylint: disable=unused-import
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-topology.json')
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+
+def test_scenario_cleanup(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,   # pylint: disable=redefined-outer-name
+) -> None:
+    # Verify the scenario has no services/slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
+
+    # Load descriptors and validate the base scenario
+    descriptor_loader = DescriptorLoader(
+        descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client)
+    descriptor_loader.validate()
+    descriptor_loader.unload()
+    validate_empty_scenario(context_client)
diff --git a/src/tests/eucnc24/tests/test_onboarding.py b/src/tests/eucnc24/tests/test_onboarding.py
new file mode 100644
index 0000000000000000000000000000000000000000..93f040877a4d0c40e2a9d832872e78c75688cffb
--- /dev/null
+++ b/src/tests/eucnc24/tests/test_onboarding.py
@@ -0,0 +1,67 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, os, time
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty
+from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
+from common.tools.object_factory.Context import json_context_id
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from .Fixtures import context_client, device_client # pylint: disable=unused-import
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-topology.json')
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+
+def test_scenario_onboarding(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+    device_client : DeviceClient,   # pylint: disable=redefined-outer-name
+) -> None:
+    validate_empty_scenario(context_client)
+
+    descriptor_loader = DescriptorLoader(
+        descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client)
+    results = descriptor_loader.process()
+    check_descriptor_load_results(results, descriptor_loader)
+    descriptor_loader.validate()
+
+    # Verify the scenario has no services/slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
+
+def test_scenario_devices_enabled(
+    context_client : ContextClient,         # pylint: disable=redefined-outer-name
+) -> None:
+    """
+    This test validates that the devices are enabled.
+    """
+    DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+
+    num_devices = -1
+    num_devices_enabled, num_retry = 0, 0
+    while (num_devices != num_devices_enabled) and (num_retry < 10):
+        time.sleep(1.0)
+        response = context_client.ListDevices(Empty())
+        num_devices = len(response.devices)
+        num_devices_enabled = 0
+        for device in response.devices:
+            if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue
+            num_devices_enabled += 1
+        LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices))
+        num_retry += 1
+    assert num_devices_enabled == num_devices
diff --git a/src/tests/eucnc24/tests/test_service_ietf_create.py b/src/tests/eucnc24/tests/test_service_ietf_create.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f713d45be7e24e3fcaf9e5c3f6f338a4db73950
--- /dev/null
+++ b/src/tests/eucnc24/tests/test_service_ietf_create.py
@@ -0,0 +1,73 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging, os
+from typing import Dict
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
+from context.client.ContextClient import ContextClient
+from .Fixtures import context_client, storage   # pylint: disable=unused-import
+from .Tools import do_rest_get_request, do_rest_post_request
+
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+REQUEST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'ietf-l3vpn-service.json')
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+
+
+# pylint: disable=redefined-outer-name, unused-argument
+def test_service_ietf_creation(
+    context_client : ContextClient,
+    storage : Dict
+):
+    # Issue service creation request
+    with open(REQUEST_FILE, 'r', encoding='UTF-8') as f:
+        svc1_data = json.load(f)
+    URL = '/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services'
+    do_rest_post_request(URL, body=svc1_data, logger=LOGGER, expected_status_codes={201})
+    vpn_id = svc1_data['ietf-l3vpn-svc:l3vpn-svc']['vpn-services']['vpn-service'][0]['vpn-id']
+
+    URL = '/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services/vpn-service={:s}/'.format(vpn_id)
+    service_data = do_rest_get_request(URL, logger=LOGGER, expected_status_codes={200})
+    service_uuid = service_data['service-id']
+    storage['svc-uuid'] = service_uuid
+
+    # Verify service was created
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 1
+    assert len(response.slice_ids) == 0
+
+    # Check there is 1 service
+    response = context_client.ListServices(ADMIN_CONTEXT_ID)
+    LOGGER.warning('Services[{:d}] = {:s}'.format(
+        len(response.services), grpc_message_to_json_string(response)
+    ))
+    assert len(response.services) == 1
+
+    for service in response.services:
+        service_id = service.service_id
+        assert service_id.service_uuid.uuid == service_uuid
+        assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE
+        assert service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM
+
+        response = context_client.ListConnections(service_id)
+        LOGGER.warning('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
+            grpc_message_to_json_string(service_id), len(response.connections),
+            grpc_message_to_json_string(response)
+        ))
+        assert len(response.connections) == 1
diff --git a/src/tests/eucnc24/tests/test_service_ietf_remove.py b/src/tests/eucnc24/tests/test_service_ietf_remove.py
new file mode 100644
index 0000000000000000000000000000000000000000..f64fc07bfd82c4bd2e50f6a3184f83635cc89cc5
--- /dev/null
+++ b/src/tests/eucnc24/tests/test_service_ietf_remove.py
@@ -0,0 +1,77 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, os
+from typing import Dict, Set, Tuple
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
+from context.client.ContextClient import ContextClient
+from .Fixtures import context_client        # pylint: disable=unused-import
+from .Tools import do_rest_delete_request
+
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+REQUEST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'ietf-l3vpn-service.json')
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+
+
+# pylint: disable=redefined-outer-name, unused-argument
+def test_service_ietf_removal(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+):
+    # Verify the scenario has 1 service and 0 slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 1
+    assert len(response.slice_ids) == 0
+
+    # Check there are no slices
+    response = context_client.ListSlices(ADMIN_CONTEXT_ID)
+    LOGGER.warning('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response)))
+    assert len(response.slices) == 0
+
+    # Check there is 1 service
+    response = context_client.ListServices(ADMIN_CONTEXT_ID)
+    LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
+    assert len(response.services) == 1
+
+    service_uuids : Set[str] = set()
+    for service in response.services:
+        service_id = service.service_id
+        assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE
+        assert service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM
+
+        response = context_client.ListConnections(service_id)
+        LOGGER.warning('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
+            grpc_message_to_json_string(service_id), len(response.connections),
+            grpc_message_to_json_string(response)
+        ))
+        assert len(response.connections) == 1
+
+        service_uuids.add(service_id.service_uuid.uuid)
+
+    # Identify service to delete
+    assert len(service_uuids) == 1
+    service_uuid = set(service_uuids).pop()
+
+    URL = '/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services/vpn-service={:s}/'.format(service_uuid)
+    do_rest_delete_request(URL, logger=LOGGER, expected_status_codes={204})
+
+    # Verify the scenario has no services/slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0
diff --git a/src/tests/eucnc24/tests/test_service_tfs_create.py b/src/tests/eucnc24/tests/test_service_tfs_create.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e0492f86f21b02c7f047d84c2db49d342710ac7
--- /dev/null
+++ b/src/tests/eucnc24/tests/test_service_tfs_create.py
@@ -0,0 +1,76 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, os
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum
+from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from service.client.ServiceClient import ServiceClient
+from .Fixtures import context_client, device_client, service_client        # pylint: disable=unused-import
+
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-service.json')
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+
+
+def test_service_tfs_creation(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+    device_client  : DeviceClient,  # pylint: disable=redefined-outer-name
+    service_client : ServiceClient, # pylint: disable=redefined-outer-name
+):
+    # Load descriptors and validate the base scenario
+    descriptor_loader = DescriptorLoader(
+        descriptors_file=DESCRIPTOR_FILE, context_client=context_client,
+        device_client=device_client, service_client=service_client
+    )
+    results = descriptor_loader.process()
+    check_descriptor_load_results(results, descriptor_loader)
+
+    # Verify the scenario has 1 service and 0 slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 1
+    assert len(response.slice_ids) == 0
+
+    # Check there are no slices
+    response = context_client.ListSlices(ADMIN_CONTEXT_ID)
+    LOGGER.warning('Slices[{:d}] = {:s}'.format(
+        len(response.slices), grpc_message_to_json_string(response)
+    ))
+    assert len(response.slices) == 0
+
+    # Check there is 1 service
+    response = context_client.ListServices(ADMIN_CONTEXT_ID)
+    LOGGER.warning('Services[{:d}] = {:s}'.format(
+        len(response.services), grpc_message_to_json_string(response)
+    ))
+    assert len(response.services) == 1
+
+    for service in response.services:
+        service_id = service.service_id
+        assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE
+        assert service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM
+
+        response = context_client.ListConnections(service_id)
+        LOGGER.warning('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
+            grpc_message_to_json_string(service_id), len(response.connections),
+            grpc_message_to_json_string(response)
+        ))
+        assert len(response.connections) == 1
diff --git a/src/tests/eucnc24/tests/test_service_tfs_remove.py b/src/tests/eucnc24/tests/test_service_tfs_remove.py
new file mode 100644
index 0000000000000000000000000000000000000000..76fdf0fc7407ce86bbd87806932eb1a4a45c40d1
--- /dev/null
+++ b/src/tests/eucnc24/tests/test_service_tfs_remove.py
@@ -0,0 +1,80 @@
+# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, os
+from typing import Set, Tuple
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId, ServiceId, ServiceStatusEnum, ServiceTypeEnum
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
+from common.tools.object_factory.Service import json_service_id
+from context.client.ContextClient import ContextClient
+from service.client.ServiceClient import ServiceClient
+from .Fixtures import context_client, service_client        # pylint: disable=unused-import
+
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-service.json')
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+
+
+def test_service_tfs_removal(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+    service_client : ServiceClient, # pylint: disable=redefined-outer-name
+):
+    # Verify the scenario has 1 service and 0 slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 1
+    assert len(response.slice_ids) == 0
+
+    # Check there are no slices
+    response = context_client.ListSlices(ADMIN_CONTEXT_ID)
+    LOGGER.warning('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response)))
+    assert len(response.slices) == 0
+
+    # Check there is 1 service
+    response = context_client.ListServices(ADMIN_CONTEXT_ID)
+    LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
+    assert len(response.services) == 1
+
+    context_service_uuids : Set[Tuple[str, str]] = set()
+    for service in response.services:
+        service_id = service.service_id
+        assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE
+        assert service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM
+
+        response = context_client.ListConnections(service_id)
+        LOGGER.warning('  ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
+            grpc_message_to_json_string(service_id), len(response.connections),
+            grpc_message_to_json_string(response)
+        ))
+        assert len(response.connections) == 1
+
+        context_uuid = service_id.context_id.context_uuid.uuid
+        service_uuid = service_id.service_uuid.uuid
+        context_service_uuids.add((context_uuid, service_uuid))
+
+    # Identify service to delete
+    assert len(context_service_uuids) == 1
+    context_uuid, service_uuid = set(context_service_uuids).pop()
+
+    # Delete Service
+    service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid))))
+
+    # Verify the scenario has no services/slices
+    response = context_client.GetContext(ADMIN_CONTEXT_ID)
+    assert len(response.service_ids) == 0
+    assert len(response.slice_ids) == 0