diff --git a/src/common/tests/test_base_event_collector_retry.py b/src/common/tests/test_base_event_collector_retry.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0ece439b0598b73ff26422367de1afc0d7d36a1
--- /dev/null
+++ b/src/common/tests/test_base_event_collector_retry.py
@@ -0,0 +1,72 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc
+import common.tools.grpc.BaseEventCollector as base_event_collector_module
+from common.proto.context_pb2 import DeviceEvent, EventTypeEnum
+
+
+class _FakeRpcError(grpc.RpcError):
+ def __init__(self, status_code):
+ self._status_code = status_code
+
+ def code(self):
+ return self._status_code
+
+
+class _FakeStream:
+ def __init__(self, events=None):
+ self._events = iter(events or [])
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return next(self._events)
+
+ def cancel(self):
+ pass
+
+
+def _create_device_event():
+ event = DeviceEvent()
+ event.event.event_type = EventTypeEnum.EVENTTYPE_CREATE
+ event.event.timestamp.timestamp = 1.0
+ event.device_id.device_uuid.uuid = 'dev1'
+ return event
+
+
+def test_base_event_collector_retries_if_subscription_creation_fails(monkeypatch):
+ monkeypatch.setattr(base_event_collector_module.time, 'sleep', lambda _seconds: None)
+
+ state = {'calls': 0}
+
+ def subscription_method(_request):
+ state['calls'] += 1
+ if state['calls'] == 1:
+ raise _FakeRpcError(grpc.StatusCode.UNAVAILABLE)
+ if state['calls'] == 2:
+ return _FakeStream(events=[_create_device_event()])
+ raise _FakeRpcError(grpc.StatusCode.CANCELLED)
+
+ collector = base_event_collector_module.BaseEventCollector()
+ collector.install_collector(subscription_method, object())
+
+ collector.start()
+ try:
+ event = collector.get_event(block=True, timeout=1.0)
+ finally:
+ collector.stop()
+
+ assert event.device_id.device_uuid.uuid == 'dev1'
diff --git a/src/common/tools/grpc/BaseEventCollector.py b/src/common/tools/grpc/BaseEventCollector.py
index 637fa59795e6320719017e14c37cc88ecd544dce..fbad2d5bb784b88dad5c2754fb4c3bfb4025df1a 100644
--- a/src/common/tools/grpc/BaseEventCollector.py
+++ b/src/common/tools/grpc/BaseEventCollector.py
@@ -41,8 +41,8 @@ class CollectorThread(threading.Thread):
def run(self) -> None:
while not self._terminate.is_set():
- self._stream = self._subscription_func()
try:
+ self._stream = self._subscription_func()
for event in self._stream:
if self._log_events_received:
str_event = grpc_message_to_json_string(event)
diff --git a/src/context/client/EventsCollector.py b/src/context/client/EventsCollector.py
index 911c3f5bedbb4e83ace178e4d6fae01f13f9b1f6..2e43b735c8c3f4ce35c23e53280639a34cfd56ee 100644
--- a/src/context/client/EventsCollector.py
+++ b/src/context/client/EventsCollector.py
@@ -12,52 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import grpc, logging, queue, threading, time
-from typing import Callable
+import logging
from common.proto.context_pb2 import Empty
-from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.grpc.BaseEventCollector import BaseEventCollector
from context.client.ContextClient import ContextClient
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
-class _Collector(threading.Thread):
- def __init__(
- self, subscription_func : Callable, events_queue = queue.PriorityQueue,
- terminate = threading.Event, log_events_received: bool = False
- ) -> None:
- super().__init__(daemon=False)
- self._subscription_func = subscription_func
- self._events_queue = events_queue
- self._terminate = terminate
- self._log_events_received = log_events_received
- self._stream = None
-
- def cancel(self) -> None:
- if self._stream is None: return
- self._stream.cancel()
-
- def run(self) -> None:
- while not self._terminate.is_set():
- self._stream = self._subscription_func()
- try:
- for event in self._stream:
- if self._log_events_received:
- str_event = grpc_message_to_json_string(event)
- LOGGER.info('[_collect] event: {:s}'.format(str_event))
- timestamp = event.event.timestamp.timestamp
- self._events_queue.put_nowait((timestamp, event))
- except grpc.RpcError as e:
- if e.code() == grpc.StatusCode.UNAVAILABLE:
- LOGGER.info('[_collect] UNAVAILABLE... retrying...')
- time.sleep(0.5)
- continue
- elif e.code() == grpc.StatusCode.CANCELLED:
- break
- else:
- raise # pragma: no cover
-
-class EventsCollector:
+class EventsCollector(BaseEventCollector):
def __init__(
self, context_client : ContextClient,
log_events_received : bool = False,
@@ -69,93 +32,19 @@ class EventsCollector:
activate_slice_collector : bool = True,
activate_connection_collector : bool = True,
) -> None:
- self._events_queue = queue.PriorityQueue()
- self._terminate = threading.Event()
- self._log_events_received = log_events_received
-
- self._context_thread = _Collector(
- lambda: context_client.GetContextEvents(Empty()),
- self._events_queue, self._terminate, self._log_events_received
- ) if activate_context_collector else None
-
- self._topology_thread = _Collector(
- lambda: context_client.GetTopologyEvents(Empty()),
- self._events_queue, self._terminate, self._log_events_received
- ) if activate_topology_collector else None
-
- self._device_thread = _Collector(
- lambda: context_client.GetDeviceEvents(Empty()),
- self._events_queue, self._terminate, self._log_events_received
- ) if activate_device_collector else None
-
- self._link_thread = _Collector(
- lambda: context_client.GetLinkEvents(Empty()),
- self._events_queue, self._terminate, self._log_events_received
- ) if activate_link_collector else None
-
- self._service_thread = _Collector(
- lambda: context_client.GetServiceEvents(Empty()),
- self._events_queue, self._terminate, self._log_events_received
- ) if activate_service_collector else None
-
- self._slice_thread = _Collector(
- lambda: context_client.GetSliceEvents(Empty()),
- self._events_queue, self._terminate, self._log_events_received
- ) if activate_slice_collector else None
-
- self._connection_thread = _Collector(
- lambda: context_client.GetConnectionEvents(Empty()),
- self._events_queue, self._terminate, self._log_events_received
- ) if activate_connection_collector else None
-
- def start(self):
- self._terminate.clear()
-
- if self._context_thread is not None: self._context_thread.start()
- if self._topology_thread is not None: self._topology_thread.start()
- if self._device_thread is not None: self._device_thread.start()
- if self._link_thread is not None: self._link_thread.start()
- if self._service_thread is not None: self._service_thread.start()
- if self._slice_thread is not None: self._slice_thread.start()
- if self._connection_thread is not None: self._connection_thread.start()
-
- def get_event(self, block : bool = True, timeout : float = 0.1):
- try:
- _,event = self._events_queue.get(block=block, timeout=timeout)
- return event
- except queue.Empty: # pylint: disable=catching-non-exception
- return None
-
- def get_events(self, block : bool = True, timeout : float = 0.1, count : int = None):
- events = []
- if count is None:
- while not self._terminate.is_set():
- event = self.get_event(block=block, timeout=timeout)
- if event is None: break
- events.append(event)
- else:
- while len(events) < count:
- if self._terminate.is_set(): break
- event = self.get_event(block=block, timeout=timeout)
- if event is None: continue
- events.append(event)
- return sorted(events, key=lambda e: e.event.timestamp.timestamp)
-
- def stop(self):
- self._terminate.set()
-
- if self._context_thread is not None: self._context_thread.cancel()
- if self._topology_thread is not None: self._topology_thread.cancel()
- if self._device_thread is not None: self._device_thread.cancel()
- if self._link_thread is not None: self._link_thread.cancel()
- if self._service_thread is not None: self._service_thread.cancel()
- if self._slice_thread is not None: self._slice_thread.cancel()
- if self._connection_thread is not None: self._connection_thread.cancel()
-
- if self._context_thread is not None: self._context_thread.join()
- if self._topology_thread is not None: self._topology_thread.join()
- if self._device_thread is not None: self._device_thread.join()
- if self._link_thread is not None: self._link_thread.join()
- if self._service_thread is not None: self._service_thread.join()
- if self._slice_thread is not None: self._slice_thread.join()
- if self._connection_thread is not None: self._connection_thread.join()
+ super().__init__()
+
+ if activate_context_collector:
+ self.install_collector(context_client.GetContextEvents, Empty(), log_events_received)
+ if activate_topology_collector:
+ self.install_collector(context_client.GetTopologyEvents, Empty(), log_events_received)
+ if activate_device_collector:
+ self.install_collector(context_client.GetDeviceEvents, Empty(), log_events_received)
+ if activate_link_collector:
+ self.install_collector(context_client.GetLinkEvents, Empty(), log_events_received)
+ if activate_service_collector:
+ self.install_collector(context_client.GetServiceEvents, Empty(), log_events_received)
+ if activate_slice_collector:
+ self.install_collector(context_client.GetSliceEvents, Empty(), log_events_received)
+ if activate_connection_collector:
+ self.install_collector(context_client.GetConnectionEvents, Empty(), log_events_received)
diff --git a/src/context/tests/test_events_collector_retry.py b/src/context/tests/test_events_collector_retry.py
new file mode 100644
index 0000000000000000000000000000000000000000..e324d8bb8a7bacd4931f16e08faf9eca98444a5b
--- /dev/null
+++ b/src/context/tests/test_events_collector_retry.py
@@ -0,0 +1,84 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc
+import common.tools.grpc.BaseEventCollector as base_event_collector_module
+import context.client.EventsCollector as context_events_module
+from common.proto.context_pb2 import DeviceEvent, EventTypeEnum
+
+
+class _FakeRpcError(grpc.RpcError):
+ def __init__(self, status_code):
+ self._status_code = status_code
+
+ def code(self):
+ return self._status_code
+
+
+class _FakeStream:
+ def __init__(self, events=None):
+ self._events = iter(events or [])
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return next(self._events)
+
+ def cancel(self):
+ pass
+
+
+class _FakeContextClient:
+ def __init__(self):
+ self.calls = 0
+
+ def GetDeviceEvents(self, _request):
+ self.calls += 1
+ if self.calls == 1:
+ raise _FakeRpcError(grpc.StatusCode.UNAVAILABLE)
+ if self.calls == 2:
+ return _FakeStream(events=[_create_device_event()])
+ raise _FakeRpcError(grpc.StatusCode.CANCELLED)
+
+
+def _create_device_event():
+ event = DeviceEvent()
+ event.event.event_type = EventTypeEnum.EVENTTYPE_CREATE
+ event.event.timestamp.timestamp = 1.0
+ event.device_id.device_uuid.uuid = 'dev1'
+ return event
+
+
+def test_events_collector_retries_if_subscription_creation_fails(monkeypatch):
+ monkeypatch.setattr(base_event_collector_module.time, 'sleep', lambda _seconds: None)
+
+ collector = context_events_module.EventsCollector(
+ _FakeContextClient(),
+ activate_context_collector=False,
+ activate_topology_collector=False,
+ activate_device_collector=True,
+ activate_link_collector=False,
+ activate_service_collector=False,
+ activate_slice_collector=False,
+ activate_connection_collector=False,
+ )
+
+ collector.start()
+ try:
+ event = collector.get_event(block=True, timeout=1.0)
+ finally:
+ collector.stop()
+
+ assert event.device_id.device_uuid.uuid == 'dev1'
diff --git a/src/device/service/drivers/gnmi_openconfig/GnmiOpenConfigDriver.py b/src/device/service/drivers/gnmi_openconfig/GnmiOpenConfigDriver.py
index 15204e5bde9efa3fabce00d10cc9c912d0a46373..ac9c16db47ef184f2c24aaeba3648fae1faef8ff 100644
--- a/src/device/service/drivers/gnmi_openconfig/GnmiOpenConfigDriver.py
+++ b/src/device/service/drivers/gnmi_openconfig/GnmiOpenConfigDriver.py
@@ -17,7 +17,7 @@ from typing import Any, Iterator, List, Optional, Tuple, Union
from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
from common.type_checkers.Checkers import chk_type
from device.service.driver_api._Driver import _Driver
-from .GnmiSessionHandler import GnmiSessionHandler
+from .GnmiSessionHandler import GnmiSessionHandler, INITIAL_TARGET_INFO_RESOURCE_KEY
DRIVER_NAME = 'gnmi_openconfig'
METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME})
@@ -51,7 +51,9 @@ class GnmiOpenConfigDriver(_Driver):
@metered_subclass_method(METRICS_POOL)
def GetInitialConfig(self) -> List[Tuple[str, Any]]:
with self.__lock:
- return []
+ target_facts = self.__handler.target_facts
+ if len(target_facts) == 0: return []
+ return [(INITIAL_TARGET_INFO_RESOURCE_KEY, target_facts)]
@metered_subclass_method(METRICS_POOL)
def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]:
diff --git a/src/device/service/drivers/gnmi_openconfig/GnmiSessionHandler.py b/src/device/service/drivers/gnmi_openconfig/GnmiSessionHandler.py
index 6eb9271ab436c5ee85dae39795b346406916eee4..4530e6174b15f6aaa56250c2382ffbc1af031c79 100644
--- a/src/device/service/drivers/gnmi_openconfig/GnmiSessionHandler.py
+++ b/src/device/service/drivers/gnmi_openconfig/GnmiSessionHandler.py
@@ -19,6 +19,7 @@ from common.type_checkers.Checkers import chk_float, chk_length, chk_string, chk
from .gnmi.gnmi_pb2_grpc import gNMIStub
from .gnmi.gnmi_pb2 import Encoding, GetRequest, SetRequest, UpdateResult # pylint: disable=no-name-in-module
from .handlers import ALL_RESOURCE_KEYS, compose, get_path, parse
+from .handlers.Interface import EOS_TAGGED_L3_REPLACE_FIELD
from .handlers.YangHandler import YangHandler
from .tools.Capabilities import check_capabilities
from .tools.Channel import get_grpc_channel
@@ -27,6 +28,91 @@ from .tools.Subscriptions import Subscriptions
from .tools.Value import decode_value #, value_exists
from .MonitoringThread import MonitoringThread
+INITIAL_TARGET_INFO_RESOURCE_KEY = '_info/gnmi/target'
+
+DISCOVERY_PATH_SYSTEM_STATE = '/openconfig-system:system/state'
+DISCOVERY_PATH_CHASSIS_STATE = '/openconfig-platform:components/component[name=Chassis]/state'
+
+
+def _normalize_hint(value: Any) -> Optional[str]:
+ if not isinstance(value, str): return None
+ value = value.strip()
+ if len(value) == 0: return None
+ return value.lower()
+
+def _extract_target_facts_from_system_state(system_state : Dict[str, Any]) -> Dict[str, str]:
+ facts : Dict[str, str] = dict()
+ hostname = system_state.get('openconfig-system:hostname')
+ if isinstance(hostname, str) and len(hostname) > 0:
+ facts['hostname'] = hostname
+
+ software_version = system_state.get('openconfig-system:software-version')
+ if isinstance(software_version, str) and len(software_version) > 0:
+ facts['software_version'] = software_version
+
+ return facts
+
+def _extract_target_facts_from_chassis_state(chassis_state : Dict[str, Any]) -> Dict[str, str]:
+ facts : Dict[str, str] = dict()
+
+ vendor = chassis_state.get('openconfig-platform:mfg-name')
+ if isinstance(vendor, str) and len(vendor) > 0:
+ facts['vendor'] = vendor
+
+ model = chassis_state.get('openconfig-platform:part-no')
+ if isinstance(model, str) and len(model) > 0:
+ facts['model'] = model
+
+ description = chassis_state.get('openconfig-platform:description')
+ if isinstance(description, str) and len(description) > 0:
+ facts['description'] = description
+
+ serial_no = chassis_state.get('openconfig-platform:serial-no')
+ if isinstance(serial_no, str) and len(serial_no) > 0:
+ facts['serial_no'] = serial_no
+
+ return facts
+
+
+def _is_arista_eos_target(settings: Dict[str, Any]) -> bool:
+ hints = []
+ target_facts = settings.get('_target_facts', {})
+ if isinstance(target_facts, dict):
+ for value in target_facts.values():
+ normalized_value = _normalize_hint(value)
+ if normalized_value is not None:
+ hints.append(normalized_value)
+ for key in ('vendor', 'platform', 'type', 'device_type', 'kind', 'model'):
+ normalized_value = _normalize_hint(settings.get(key))
+ if normalized_value is not None:
+ hints.append(normalized_value)
+ hint_blob = ' '.join(hints)
+ return any(token in hint_blob for token in ('arista', 'ceos', 'eos'))
+
+
+def _is_tagged_l3_access_subinterface(resource_key: str, resource_value: Dict[str, Any]) -> bool:
+ if not resource_key.startswith('/interface['):
+ return False
+ if resource_value.get('vlan_id') is None:
+ return False
+ if resource_value.get('index') != 0:
+ return False
+ if resource_value.get('address_ip') is None or resource_value.get('address_prefix') is None:
+ return False
+ if resource_value.get('type') != 'l3ipvlan':
+ return False
+ return True
+
+
+def _prepare_set_operation(
+ settings: Dict[str, Any], resource_key: str, resource_value: Dict[str, Any]
+) -> Tuple[str, Dict[str, Any]]:
+ prepared_value = copy.deepcopy(resource_value)
+ if _is_arista_eos_target(settings) and _is_tagged_l3_access_subinterface(resource_key, prepared_value):
+ prepared_value[EOS_TAGGED_L3_REPLACE_FIELD] = True
+ return 'replace', prepared_value
+ return 'update', prepared_value
+
class GnmiSessionHandler:
def __init__(self, address : str, port : int, settings : Dict, logger : logging.Logger) -> None:
self._address = address
@@ -45,6 +131,7 @@ class GnmiSessionHandler:
self._subscriptions = Subscriptions()
self._in_subscriptions = queue.Queue()
self._out_samples = queue.Queue()
+ self._target_facts : Dict[str, Any] = dict()
def __del__(self) -> None:
self._logger.info('Destroying YangValidator...')
@@ -64,11 +151,64 @@ class GnmiSessionHandler:
@property
def out_samples(self): return self._out_samples
+ @property
+ def target_facts(self): return copy.deepcopy(self._target_facts)
+
+ def _merge_target_facts(self, target_facts : Optional[Dict[str, Any]]) -> None:
+ if not isinstance(target_facts, dict): return
+ for key, value in target_facts.items():
+ if value is None: continue
+ if isinstance(value, str) and len(value) == 0: continue
+ self._target_facts[key] = value
+ self._settings['_target_facts'] = copy.deepcopy(self._target_facts)
+
+ def _get_discovery_value(self, str_path : str) -> Optional[Dict[str, Any]]:
+ metadata = [('username', self._username), ('password', self._password)]
+
+ get_request = GetRequest()
+ get_request.type = GetRequest.DataType.STATE
+ get_request.encoding = Encoding.JSON_IETF
+ get_request.path.append(path_from_string(str_path))
+
+ try:
+ get_reply = self._stub.Get(get_request, metadata=metadata, timeout=30)
+ except grpc.RpcError:
+ self._logger.debug('Discovery probe failed for path=%s', str_path, exc_info=True)
+ return None
+
+ for notification in get_reply.notification:
+ for update in notification.update:
+ try:
+ value = decode_value(update.val)
+ except Exception: # pylint: disable=broad-except
+ self._logger.debug('Discovery decode failed for path=%s', str_path, exc_info=True)
+ continue
+ if isinstance(value, dict):
+ return value
+
+ return None
+
+ def _discover_target_facts(self, capability_info : Dict[str, Any]) -> None:
+ self._merge_target_facts(capability_info.get('target_facts'))
+
+ gnmi_version = capability_info.get('gnmi_version')
+ if isinstance(gnmi_version, str) and len(gnmi_version) > 0:
+ self._merge_target_facts({'gnmi_version': gnmi_version})
+
+ system_state = self._get_discovery_value(DISCOVERY_PATH_SYSTEM_STATE)
+ if isinstance(system_state, dict):
+ self._merge_target_facts(_extract_target_facts_from_system_state(system_state))
+
+ chassis_state = self._get_discovery_value(DISCOVERY_PATH_CHASSIS_STATE)
+ if isinstance(chassis_state, dict):
+ self._merge_target_facts(_extract_target_facts_from_chassis_state(chassis_state))
+
def connect(self):
with self._lock:
self._channel = get_grpc_channel(self._address, self._port, self._use_tls, self._logger)
self._stub = gNMIStub(self._channel)
- check_capabilities(self._stub, self._username, self._password, timeout=120)
+ capability_info = check_capabilities(self._stub, self._username, self._password, timeout=120)
+ self._discover_target_facts(capability_info)
self._monit_thread = MonitoringThread(
self._stub, self._logger, self._settings, self._in_subscriptions, self._out_samples)
self._monit_thread.start()
@@ -178,16 +318,17 @@ class GnmiSessionHandler:
#if resource_tuple is None: continue
#_, value, exists, operation_done = resource_tuple
if isinstance(resource_value, str): resource_value = json.loads(resource_value)
- str_path, str_data = compose(resource_key, resource_value, self._yang_handler, delete=False)
+ operation, prepared_value = _prepare_set_operation(self._settings, resource_key, resource_value)
+ str_path, str_data = compose(resource_key, prepared_value, self._yang_handler, delete=False)
if str_path is None: continue # nothing to set
#self._logger.info('---3')
#self._logger.info(str(str_path))
#self._logger.info(str(str_data))
- set_request_list = set_request.update #if exists else set_request.replace
+ set_request_list = set_request.replace if operation == 'replace' else set_request.update
set_request_entry = set_request_list.add()
set_request_entry.path.CopyFrom(path_from_string(str_path))
set_request_entry.val.json_val = str_data.encode('UTF-8')
- resources_requested.append((resource_key, resource_value))
+ resources_requested.append((len(resources_requested), resource_key, str_path, operation))
self._logger.debug('set_request={:s}'.format(grpc_message_to_json_string(set_request)))
metadata = [('username', self._username), ('password', self._password)]
@@ -195,13 +336,34 @@ class GnmiSessionHandler:
set_reply = self._stub.Set(set_request, metadata=metadata, timeout=timeout)
self._logger.debug('set_reply={:s}'.format(grpc_message_to_json_string(set_reply)))
- results = []
- for (resource_key, resource_value), update_result in zip(resources_requested, set_reply.response):
+ results = [
+ (resource_key, Exception('Not Processed'))
+ for _, resource_key, _, _ in resources_requested
+ ]
+ pending_requests = list(resources_requested)
+ for update_result in set_reply.response:
operation = update_result.op
+ str_path = path_to_string(update_result.path)
+ expected_operation = None
if operation == UpdateResult.UPDATE:
- results.append((resource_key, True))
- else:
- results.append((resource_key, Exception('Unexpected')))
+ expected_operation = 'update'
+ elif operation == UpdateResult.REPLACE:
+ expected_operation = 'replace'
+
+ matched = False
+ if expected_operation is not None:
+ for j, (resource_idx, resource_key, resource_path, requested_operation) in enumerate(pending_requests):
+ if resource_path == str_path and requested_operation == expected_operation:
+ results[resource_idx] = (resource_key, True)
+ pending_requests.pop(j)
+ matched = True
+ break
+
+ if not matched:
+ self._logger.warning(
+ 'Unexpected Set reply operation=%s path=%s pending=%s',
+ str(operation), str(str_path), str(pending_requests)
+ )
#str_path = path_to_string(update_result.path)
#resource_tuple = resource_tuples.get(str_path)
diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/Interface.py b/src/device/service/drivers/gnmi_openconfig/handlers/Interface.py
index d59bff7a61cd1ef468f8220456bf7434b16bb0b6..49441e106afb015fcfbddd5570bbf95d274f84e6 100644
--- a/src/device/service/drivers/gnmi_openconfig/handlers/Interface.py
+++ b/src/device/service/drivers/gnmi_openconfig/handlers/Interface.py
@@ -20,40 +20,120 @@ from .YangHandler import YangHandler
LOGGER = logging.getLogger(__name__)
+MIN_MTU = 68
+EOS_TAGGED_L3_REPLACE_FIELD = '_eos_tagged_l3_replace'
+
+
+def _normalize_interface_type(if_type: str, sif_index: int) -> str:
+ if if_type == 'l3ipvlan' and sif_index is not None:
+ return 'iana-if-type:ethernetCsmacd'
+ if ':' not in if_type:
+ return 'iana-if-type:{:s}'.format(if_type)
+ return if_type
+
class InterfaceHandler(_Handler):
def get_resource_key(self) -> str: return '/interface/subinterface'
def get_path(self) -> str: return '/openconfig-interfaces:interfaces'
+ @staticmethod
+ def _create_subinterface(
+ yang_sifs: libyang.DContainer, sif_index: int, enabled: bool = None,
+ vlan_id: int = None, address_ip: str = None, address_prefix: int = None
+ ) -> libyang.DContainer:
+ yang_sif_path = 'subinterface[index="{:d}"]'.format(sif_index)
+ yang_sif: libyang.DContainer = yang_sifs.create_path(yang_sif_path)
+ yang_sif.create_path('config/index', sif_index)
+ if enabled is not None:
+ yang_sif.create_path('config/enabled', enabled)
+
+ if vlan_id is not None:
+ yang_subif_vlan : libyang.DContainer = yang_sif.create_path('openconfig-vlan:vlan')
+ yang_subif_vlan.create_path('match/single-tagged/config/vlan-id', vlan_id)
+
+ yang_ipv4 : libyang.DContainer = yang_sif.create_path('openconfig-if-ip:ipv4')
+ if enabled is not None:
+ yang_ipv4.create_path('config/enabled', enabled)
+
+ if address_ip is not None and address_prefix is not None:
+ yang_ipv4_addrs : libyang.DContainer = yang_ipv4.create_path('addresses')
+ yang_ipv4_addr_path = 'address[ip="{:s}"]'.format(address_ip)
+ yang_ipv4_addr : libyang.DContainer = yang_ipv4_addrs.create_path(yang_ipv4_addr_path)
+ yang_ipv4_addr.create_path('config/ip', address_ip)
+ yang_ipv4_addr.create_path('config/prefix-length', address_prefix)
+
+ return yang_sif
+
def compose(
self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False
) -> Tuple[str, str]:
- if_name = get_str(resource_value, 'name' ) # ethernet-1/1
- sif_index = get_int(resource_value, 'index', 0) # 0
+ if_name = get_str(resource_value, 'name' ) # ethernet-1/1
+ sif_index = get_int(resource_value, 'index', None) # 0
+ vlan_id = get_int(resource_value, 'vlan_id', None)
+ eos_tagged_l3_replace = get_bool(resource_value, EOS_TAGGED_L3_REPLACE_FIELD, False)
if delete:
- PATH_TMPL = '/interfaces/interface[name={:s}]/subinterfaces/subinterface[index={:d}]'
- str_path = PATH_TMPL.format(if_name, sif_index)
- str_data = json.dumps({})
+ if eos_tagged_l3_replace:
+ root_node : libyang.DContainer = yang_handler.get_data_path(
+ '/openconfig-interfaces:interfaces'
+ )
+ str_path = '/interfaces/interface[name={:s}]'.format(if_name)
+ yang_if = root_node.find_path('/'.join([
+ '',
+ 'openconfig-interfaces:interfaces',
+ 'interface[name="{:s}"]'.format(if_name),
+ ]))
+ if yang_if is not None:
+ yang_if.unlink()
+ yang_if.free()
+ str_data = json.dumps({})
+ return str_path, str_data
+
+ if sif_index is None:
+ return None, None
root_node : libyang.DContainer = yang_handler.get_data_path(
'/openconfig-interfaces:interfaces'
)
- yang_sif = root_node.find_path('/'.join([
- '', # add slash at the beginning
- 'openconfig-interfaces:interfaces',
- 'interface[name="{:s}"]'.format(if_name),
- 'subinterfaces',
- 'subinterface[index="{:d}"]'.format(sif_index),
- ]))
+
+ address_ip = get_str(resource_value, 'address_ip', None)
+ if address_ip is None:
+ PATH_TMPL = '/interfaces/interface[name={:s}]/subinterfaces/subinterface[index={:d}]'
+ str_path = PATH_TMPL.format(if_name, sif_index)
+
+ yang_sif = root_node.find_path('/'.join([
+ '', # add slash at the beginning
+ 'openconfig-interfaces:interfaces',
+ 'interface[name="{:s}"]'.format(if_name),
+ 'subinterfaces',
+ 'subinterface[index="{:d}"]'.format(sif_index),
+ ]))
+ else:
+ PATH_TMPL = (
+ '/interfaces/interface[name={:s}]/subinterfaces/subinterface[index={:d}]'
+ '/openconfig-if-ip:ipv4/addresses/address[ip={:s}]'
+ )
+ str_path = PATH_TMPL.format(if_name, sif_index, address_ip)
+
+ yang_sif = root_node.find_path('/'.join([
+ '', # add slash at the beginning
+ 'openconfig-interfaces:interfaces',
+ 'interface[name="{:s}"]'.format(if_name),
+ 'subinterfaces',
+ 'subinterface[index="{:d}"]'.format(sif_index),
+ 'openconfig-if-ip:ipv4',
+ 'addresses',
+ 'address[ip="{:s}"]'.format(address_ip)
+ ]))
+
if yang_sif is not None:
yang_sif.unlink()
yang_sif.free()
+ str_data = json.dumps({})
return str_path, str_data
- enabled = get_bool(resource_value, 'enabled', True) # True/False
- #if_type = get_str (resource_value, 'type' ) # 'l3ipvlan'
- vlan_id = get_int (resource_value, 'vlan_id', ) # 127
+ enabled = get_bool(resource_value, 'enabled' ) # True/False
+ if_type = get_str (resource_value, 'type' ) # 'l3ipvlan'
address_ip = get_str (resource_value, 'address_ip' ) # 172.16.0.1
address_prefix = get_int (resource_value, 'address_prefix') # 24
mtu = get_int (resource_value, 'mtu' ) # 1500
@@ -62,28 +142,34 @@ class InterfaceHandler(_Handler):
yang_if_path = 'interface[name="{:s}"]'.format(if_name)
yang_if : libyang.DContainer = yang_ifs.create_path(yang_if_path)
yang_if.create_path('config/name', if_name )
- if enabled is not None: yang_if.create_path('config/enabled', enabled)
- if mtu is not None: yang_if.create_path('config/mtu', mtu)
+ if if_type is not None:
+ yang_if.create_path('config/type', _normalize_interface_type(if_type, sif_index))
+ if enabled is not None:
+ yang_if.create_path('config/enabled', enabled)
+
+ if mtu is not None and mtu >= MIN_MTU:
+ yang_if.create_path('config/mtu', mtu)
+
+ if sif_index is None:
+ str_path = '/interfaces/interface[name={:s}]'.format(if_name)
+ str_data = yang_if.print_mem('json')
+ json_data = json.loads(str_data)
+ json_data = json_data['openconfig-interfaces:interface'][0]
+ str_data = json.dumps(json_data)
+ return str_path, str_data
yang_sifs : libyang.DContainer = yang_if.create_path('subinterfaces')
- yang_sif_path = 'subinterface[index="{:d}"]'.format(sif_index)
- yang_sif : libyang.DContainer = yang_sifs.create_path(yang_sif_path)
- yang_sif.create_path('config/index', sif_index)
- if enabled is not None: yang_sif.create_path('config/enabled', enabled)
-
- if vlan_id is not None:
- yang_subif_vlan : libyang.DContainer = yang_sif.create_path('openconfig-vlan:vlan')
- yang_subif_vlan.create_path('match/single-tagged/config/vlan-id', vlan_id)
-
- yang_ipv4 : libyang.DContainer = yang_sif.create_path('openconfig-if-ip:ipv4')
- if enabled is not None: yang_ipv4.create_path('config/enabled', enabled)
-
- if address_ip is not None and address_prefix is not None:
- yang_ipv4_addrs : libyang.DContainer = yang_ipv4.create_path('addresses')
- yang_ipv4_addr_path = 'address[ip="{:s}"]'.format(address_ip)
- yang_ipv4_addr : libyang.DContainer = yang_ipv4_addrs.create_path(yang_ipv4_addr_path)
- yang_ipv4_addr.create_path('config/ip', address_ip)
- yang_ipv4_addr.create_path('config/prefix-length', address_prefix)
+ if eos_tagged_l3_replace and sif_index == 0 and vlan_id is not None:
+ self._create_subinterface(yang_sifs, 0, enabled=enabled)
+ self._create_subinterface(
+ yang_sifs, vlan_id, enabled=enabled, vlan_id=vlan_id,
+ address_ip=address_ip, address_prefix=address_prefix
+ )
+ else:
+ self._create_subinterface(
+ yang_sifs, sif_index, enabled=enabled, vlan_id=vlan_id,
+ address_ip=address_ip, address_prefix=address_prefix
+ )
str_path = '/interfaces/interface[name={:s}]'.format(if_name)
str_data = yang_if.print_mem('json')
@@ -121,7 +207,6 @@ class InterfaceHandler(_Handler):
_interface = {
'name' : interface_name,
'type' : interface_type,
- 'mtu' : interface_state['mtu'],
'admin-status' : interface_state['admin-status'],
'oper-status' : interface_state['oper-status'],
'management' : interface_state['management'],
@@ -136,6 +221,9 @@ class InterfaceHandler(_Handler):
_interface['hardware-port'] = interface_state['hardware-port']
if 'transceiver' in interface_state:
_interface['transceiver'] = interface_state['transceiver']
+ if 'mtu' in interface_state:
+ mtu = interface_state['mtu']
+ if mtu > 0: _interface['mtu'] = mtu
entry_interface_key = '/interface[{:s}]'.format(interface_name)
entries.append((entry_interface_key, _interface))
@@ -164,6 +252,12 @@ class InterfaceHandler(_Handler):
_subinterface['name'] = subinterface_state['name']
if 'enabled' in subinterface_state:
_subinterface['enabled'] = subinterface_state['enabled']
+ if 'mtu' in subinterface_state:
+ mtu = subinterface_state['mtu']
+ if mtu > 0:
+ _subinterface['mtu'] = mtu
+ if 'mtu' not in _interface:
+ _interface['mtu'] = mtu
if 'vlan' in subinterface:
vlan = subinterface['vlan']
diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceStaticRoute.py b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceStaticRoute.py
index 78d77c8cdeb9b47107941e55e013da6d99bdd769..4379fd08a1e5e56c3eb5a091e7597311b8707252 100644
--- a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceStaticRoute.py
+++ b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceStaticRoute.py
@@ -40,6 +40,23 @@ class NetworkInstanceStaticRouteHandler(_Handler):
PATH_TMPL = '/network-instances/network-instance[name={:s}]/protocols'
PATH_TMPL += '/protocol[identifier={:s}][name={:s}]/static-routes/static[prefix={:s}]'
str_path = PATH_TMPL.format(ni_name, identifier, proto_name, prefix)
+
+ root_node : libyang.DContainer = yang_handler.get_data_path(
+ '/openconfig-network-instance:network-instances'
+ )
+ yang_ni_pr_sr_path = root_node.find_path('/'.join([
+ '', # add slash at the beginning
+ 'openconfig-network-instance:network-instances',
+ 'network-instance[name="{:s}"]'.format(ni_name),
+ 'protocols',
+ 'protocol[identifier="{:s}"][name="{:s}"]'.format(identifier, proto_name),
+ 'static-routes',
+ 'static[prefix="{:s}"]'.format(prefix)
+ ]))
+ if yang_ni_pr_sr_path is not None:
+ yang_ni_pr_sr_path.unlink()
+ yang_ni_pr_sr_path.free()
+
str_data = json.dumps({})
return str_path, str_data
diff --git a/src/device/service/drivers/gnmi_openconfig/tools/Capabilities.py b/src/device/service/drivers/gnmi_openconfig/tools/Capabilities.py
index 1e6023dcfcd5f7f295dedf2b361bbe744d0ceceb..a48adc4fb6dde9a0a36a5a77063446a07421dc3e 100644
--- a/src/device/service/drivers/gnmi_openconfig/tools/Capabilities.py
+++ b/src/device/service/drivers/gnmi_openconfig/tools/Capabilities.py
@@ -12,14 +12,45 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Optional, Set, Union
+from typing import Any, Dict, List, Optional, Set
from common.tools.grpc.Tools import grpc_message_to_json
from ..gnmi.gnmi_pb2 import CapabilityRequest # pylint: disable=no-name-in-module
from ..gnmi.gnmi_pb2_grpc import gNMIStub
+def _normalize_string(value : Any) -> Optional[str]:
+ if not isinstance(value, str): return None
+ value = value.strip()
+ if len(value) == 0: return None
+ return value
+
+def _infer_target_facts(supported_models : List[Dict[str, Any]]) -> Dict[str, str]:
+ facts : Dict[str, str] = dict()
+
+ names = list()
+ organizations = list()
+ for supported_model in supported_models:
+ name = _normalize_string(supported_model.get('name'))
+ if name is not None: names.append(name)
+
+ organization = _normalize_string(supported_model.get('organization'))
+ if organization is not None: organizations.append(organization)
+
+ signatures = [*names, *organizations]
+ signature_blob = ' '.join(signatures).lower()
+
+ if 'arista' in signature_blob:
+ facts['vendor'] = 'Arista'
+ if any(
+ ('eos' in name.lower()) or name.lower().startswith('arista-')
+ for name in names
+ ):
+ facts['platform'] = 'EOS'
+
+ return facts
+
def check_capabilities(
stub : gNMIStub, username : str, password : str, timeout : Optional[int] = None
-) -> Set[Union[str, int]]:
+) -> Dict[str, Any]:
metadata = [('username', username), ('password', password)]
req = CapabilityRequest()
reply = stub.Capabilities(req, metadata=metadata, timeout=timeout)
@@ -30,11 +61,11 @@ def check_capabilities(
if gnmi_version is None or gnmi_version != '0.7.0':
raise Exception('Unsupported gNMI version: {:s}'.format(str(gnmi_version)))
- #supported_models = {
- # supported_model['name']: supported_model['version']
- # for supported_model in data.get('supported_models', [])
- #}
- # TODO: check supported models and versions
+ supported_models = [
+ supported_model
+ for supported_model in data.get('supported_models', [])
+ if isinstance(supported_model, dict)
+ ]
supported_encodings = {
supported_encoding
@@ -47,3 +78,10 @@ def check_capabilities(
if 'JSON_IETF' not in supported_encodings:
# pylint: disable=broad-exception-raised
raise Exception('JSON_IETF encoding not supported')
+
+ return {
+ 'gnmi_version': gnmi_version,
+ 'supported_models': supported_models,
+ 'supported_encodings': sorted(supported_encodings),
+ 'target_facts': _infer_target_facts(supported_models),
+ }
diff --git a/src/device/tests/gnmi_openconfig/test_unitary_gnmi_openconfig_discovery.py b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_openconfig_discovery.py
new file mode 100644
index 0000000000000000000000000000000000000000..0808cc8e5c52d535ecc0d66167c21512634e3655
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_openconfig_discovery.py
@@ -0,0 +1,65 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from device.service.drivers.gnmi_openconfig.GnmiOpenConfigDriver import GnmiOpenConfigDriver
+from device.service.drivers.gnmi_openconfig.GnmiSessionHandler import INITIAL_TARGET_INFO_RESOURCE_KEY
+from device.service.drivers.gnmi_openconfig.gnmi.gnmi_pb2 import CapabilityResponse, Encoding, ModelData
+from device.service.drivers.gnmi_openconfig.tools.Capabilities import check_capabilities
+
+
+class _MockGnmiStub:
+ def __init__(self, reply):
+ self._reply = reply
+
+ def Capabilities(self, req, metadata=None, timeout=None): # pylint: disable=unused-argument
+ return self._reply
+
+
+def test_check_capabilities_extracts_arista_target_facts() -> None:
+ reply = CapabilityResponse(
+ gNMI_version='0.7.0',
+ supported_models=[
+ ModelData(name='openconfig-system', organization='OpenConfig working group', version='2.0.0'),
+ ModelData(name='arista-exp-eos', organization='Arista Networks ', version=''),
+ ],
+ supported_encodings=[Encoding.JSON_IETF, Encoding.JSON],
+ )
+
+ capability_info = check_capabilities(_MockGnmiStub(reply), 'admin', 'admin', timeout=120)
+
+ assert capability_info['gnmi_version'] == '0.7.0'
+ assert capability_info['target_facts']['vendor'] == 'Arista'
+ assert capability_info['target_facts']['platform'] == 'EOS'
+
+
+def test_driver_get_initial_config_returns_target_facts() -> None:
+ driver = GnmiOpenConfigDriver('127.0.0.1', 6030, username='admin', password='admin')
+ driver._GnmiOpenConfigDriver__handler._target_facts = { # pylint: disable=protected-access
+ 'vendor': 'Arista',
+ 'platform': 'EOS',
+ 'model': 'cEOSLab',
+ 'software_version': '4.32.2F',
+ }
+
+ initial_config = driver.GetInitialConfig()
+
+ assert initial_config == [(
+ INITIAL_TARGET_INFO_RESOURCE_KEY,
+ {
+ 'vendor': 'Arista',
+ 'platform': 'EOS',
+ 'model': 'cEOSLab',
+ 'software_version': '4.32.2F',
+ }
+ )]
diff --git a/src/device/tests/gnmi_openconfig/test_unitary_gnmi_openconfig_interface_handler.py b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_openconfig_interface_handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..e007b91862330b0fb4c67e0113244f978e9efc88
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_openconfig_interface_handler.py
@@ -0,0 +1,179 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+from device.service.drivers.gnmi_openconfig.handlers.Interface import (
+ EOS_TAGGED_L3_REPLACE_FIELD,
+ InterfaceHandler,
+)
+from device.service.drivers.gnmi_openconfig.handlers.YangHandler import YangHandler
+
+
+def test_compose_tagged_l3_subinterface_sets_parent_ethernet_type() -> None:
+ handler = InterfaceHandler()
+ yang_handler = YangHandler()
+ try:
+ _, str_data = handler.compose(
+ '/interface[Ethernet11]/subinterface[125]',
+ {
+ 'name': 'Ethernet11',
+ 'type': 'l3ipvlan',
+ 'index': 125,
+ 'vlan_id': 125,
+ 'address_ip': '172.17.1.1',
+ 'address_prefix': 24,
+ 'enabled': True,
+ },
+ yang_handler,
+ delete=False,
+ )
+ finally:
+ yang_handler.destroy()
+
+ json_data = json.loads(str_data)
+ assert json_data['config']['type'] == 'iana-if-type:ethernetCsmacd'
+
+ subinterface = json_data['subinterfaces']['subinterface'][0]
+ assert subinterface['index'] == 125
+ assert subinterface['openconfig-vlan:vlan']['match']['single-tagged']['config']['vlan-id'] == 125
+ address = subinterface['openconfig-if-ip:ipv4']['addresses']['address'][0]
+ assert address['config']['ip'] == '172.17.1.1'
+ assert address['config']['prefix-length'] == 24
+
+
+def test_compose_eos_tagged_l3_replace_emits_subif_zero_and_vlan_subif() -> None:
+ handler = InterfaceHandler()
+ yang_handler = YangHandler()
+ try:
+ _, str_data = handler.compose(
+ '/interface[Ethernet11]/subinterface[0]',
+ {
+ 'name': 'Ethernet11',
+ 'type': 'l3ipvlan',
+ 'index': 0,
+ 'vlan_id': 125,
+ 'address_ip': '172.17.1.1',
+ 'address_prefix': 24,
+ 'enabled': True,
+ EOS_TAGGED_L3_REPLACE_FIELD: True,
+ },
+ yang_handler,
+ delete=False,
+ )
+ finally:
+ yang_handler.destroy()
+
+ json_data = json.loads(str_data)
+ assert json_data['config']['type'] == 'iana-if-type:ethernetCsmacd'
+
+ subinterfaces = json_data['subinterfaces']['subinterface']
+ assert [subinterface['index'] for subinterface in subinterfaces] == [0, 125]
+
+ subinterface_zero = subinterfaces[0]
+ assert subinterface_zero['openconfig-if-ip:ipv4']['config']['enabled'] is True
+
+ subinterface_vlan = subinterfaces[1]
+ assert subinterface_vlan['openconfig-vlan:vlan']['match']['single-tagged']['config']['vlan-id'] == 125
+ address = subinterface_vlan['openconfig-if-ip:ipv4']['addresses']['address'][0]
+ assert address['config']['ip'] == '172.17.1.1'
+ assert address['config']['prefix-length'] == 24
+
+
+def test_delete_subinterface_zero_unlinks_it_from_shared_yang_state() -> None:
+ handler = InterfaceHandler()
+ yang_handler = YangHandler()
+ try:
+ handler.compose(
+ '/interface[Ethernet10]/subinterface[0]',
+ {
+ 'name': 'Ethernet10',
+ 'type': 'l3ipvlan',
+ 'index': 0,
+ 'address_ip': '172.16.1.1',
+ 'address_prefix': 24,
+ 'enabled': True,
+ },
+ yang_handler,
+ delete=False,
+ )
+
+ str_path, _ = handler.compose(
+ '/interface[Ethernet10]/subinterface[0]',
+ {
+ 'name': 'Ethernet10',
+ 'index': 0,
+ },
+ yang_handler,
+ delete=True,
+ )
+
+ root_node = yang_handler.get_data_path('/openconfig-interfaces:interfaces')
+ yang_subif = root_node.find_path('/'.join([
+ '',
+ 'openconfig-interfaces:interfaces',
+ 'interface[name="Ethernet10"]',
+ 'subinterfaces',
+ 'subinterface[index="0"]',
+ ]))
+ finally:
+ yang_handler.destroy()
+
+ assert str_path == '/interfaces/interface[name=Ethernet10]/subinterfaces/subinterface[index=0]'
+ assert yang_subif is None
+
+
+def test_delete_eos_tagged_l3_replace_unlinks_full_interface_from_shared_yang_state() -> None:
+ handler = InterfaceHandler()
+ yang_handler = YangHandler()
+ try:
+ handler.compose(
+ '/interface[Ethernet11]/subinterface[0]',
+ {
+ 'name': 'Ethernet11',
+ 'type': 'l3ipvlan',
+ 'index': 0,
+ 'vlan_id': 125,
+ 'address_ip': '172.17.1.1',
+ 'address_prefix': 24,
+ 'enabled': True,
+ EOS_TAGGED_L3_REPLACE_FIELD: True,
+ },
+ yang_handler,
+ delete=False,
+ )
+
+ str_path, _ = handler.compose(
+ '/interface[Ethernet11]/subinterface[0]',
+ {
+ 'name': 'Ethernet11',
+ 'index': 0,
+ 'vlan_id': 125,
+ EOS_TAGGED_L3_REPLACE_FIELD: True,
+ },
+ yang_handler,
+ delete=True,
+ )
+
+ root_node = yang_handler.get_data_path('/openconfig-interfaces:interfaces')
+ yang_if = root_node.find_path('/'.join([
+ '',
+ 'openconfig-interfaces:interfaces',
+ 'interface[name="Ethernet11"]',
+ ]))
+ finally:
+ yang_handler.destroy()
+
+ assert str_path == '/interfaces/interface[name=Ethernet11]'
+ assert yang_if is None
diff --git a/src/device/tests/gnmi_openconfig/test_unitary_gnmi_openconfig_session_handler.py b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_openconfig_session_handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed7f31b677c4e2c625c3e93752140b03cd7b045d
--- /dev/null
+++ b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_openconfig_session_handler.py
@@ -0,0 +1,123 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from device.service.drivers.gnmi_openconfig.GnmiSessionHandler import (
+ _extract_target_facts_from_chassis_state,
+ _extract_target_facts_from_system_state,
+ _prepare_set_operation,
+)
+from device.service.drivers.gnmi_openconfig.handlers.Interface import EOS_TAGGED_L3_REPLACE_FIELD
+
+
+def test_prepare_set_operation_uses_replace_for_arista_tagged_l3_access() -> None:
+ operation, prepared_value = _prepare_set_operation(
+ {'vendor': 'ARISTA'},
+ '/interface[Ethernet11]/subinterface[0]',
+ {
+ 'name': 'Ethernet11',
+ 'type': 'l3ipvlan',
+ 'index': 0,
+ 'vlan_id': 125,
+ 'address_ip': '172.17.1.1',
+ 'address_prefix': 24,
+ 'enabled': True,
+ },
+ )
+
+ assert operation == 'replace'
+ assert prepared_value[EOS_TAGGED_L3_REPLACE_FIELD] is True
+
+
+def test_prepare_set_operation_keeps_update_for_untagged_interface() -> None:
+ operation, prepared_value = _prepare_set_operation(
+ {'vendor': 'ARISTA'},
+ '/interface[Ethernet10]/subinterface[0]',
+ {
+ 'name': 'Ethernet10',
+ 'type': 'l3ipvlan',
+ 'index': 0,
+ 'address_ip': '172.16.1.1',
+ 'address_prefix': 24,
+ 'enabled': True,
+ },
+ )
+
+ assert operation == 'update'
+ assert EOS_TAGGED_L3_REPLACE_FIELD not in prepared_value
+
+
+def test_prepare_set_operation_keeps_update_for_non_arista_tagged_l3_access() -> None:
+ operation, prepared_value = _prepare_set_operation(
+ {'vendor': 'NOKIA'},
+ '/interface[Ethernet11]/subinterface[0]',
+ {
+ 'name': 'Ethernet11',
+ 'type': 'l3ipvlan',
+ 'index': 0,
+ 'vlan_id': 125,
+ 'address_ip': '172.17.1.1',
+ 'address_prefix': 24,
+ 'enabled': True,
+ },
+ )
+
+ assert operation == 'update'
+ assert EOS_TAGGED_L3_REPLACE_FIELD not in prepared_value
+
+
+def test_prepare_set_operation_uses_replace_for_discovered_arista_target() -> None:
+ operation, prepared_value = _prepare_set_operation(
+ {'_target_facts': {'vendor': 'Arista', 'platform': 'EOS', 'model': 'cEOSLab'}},
+ '/interface[Ethernet11]/subinterface[0]',
+ {
+ 'name': 'Ethernet11',
+ 'type': 'l3ipvlan',
+ 'index': 0,
+ 'vlan_id': 125,
+ 'address_ip': '172.17.1.1',
+ 'address_prefix': 24,
+ 'enabled': True,
+ },
+ )
+
+ assert operation == 'replace'
+ assert prepared_value[EOS_TAGGED_L3_REPLACE_FIELD] is True
+
+
+def test_extract_target_facts_from_system_state() -> None:
+ facts = _extract_target_facts_from_system_state({
+ 'openconfig-system:hostname': 'r1',
+ 'openconfig-system:software-version': '4.32.2F',
+ })
+
+ assert facts == {
+ 'hostname': 'r1',
+ 'software_version': '4.32.2F',
+ }
+
+
+def test_extract_target_facts_from_chassis_state() -> None:
+ facts = _extract_target_facts_from_chassis_state({
+ 'openconfig-platform:mfg-name': 'Arista',
+ 'openconfig-platform:part-no': 'cEOSLab',
+ 'openconfig-platform:description': 'cEOSLab',
+ 'openconfig-platform:serial-no': 'SERIAL123',
+ })
+
+ assert facts == {
+ 'vendor': 'Arista',
+ 'model': 'cEOSLab',
+ 'description': 'cEOSLab',
+ 'serial_no': 'SERIAL123',
+ }
diff --git a/src/monitoring/service/EventTools.py b/src/monitoring/service/EventTools.py
index 9472bac31e2ba6ccb3548b7088108bc5ebfcf3ee..2877b98d8c8ddaf8577f363d7e4a476ffeeda738 100644
--- a/src/monitoring/service/EventTools.py
+++ b/src/monitoring/service/EventTools.py
@@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import grpc, json, logging, queue, threading
+import grpc, json, logging
from typing import Dict
from common.method_wrappers.ServiceExceptions import ServiceException
from common.proto import monitoring_pb2
-from common.proto.context_pb2 import ConfigActionEnum, DeviceOperationalStatusEnum, Empty, EventTypeEnum
+from common.proto.context_pb2 import ConfigActionEnum, DeviceOperationalStatusEnum, EventTypeEnum
from common.proto.kpi_sample_types_pb2 import KpiSampleType
from common.tools.grpc.Tools import grpc_message_to_json_string
from context.client.ContextClient import ContextClient
+from context.client.EventsCollector import EventsCollector
from monitoring.client.MonitoringClient import MonitoringClient
from monitoring.service.MonitoringServiceServicerImpl import LOGGER
from monitoring.service.NameMapping import NameMapping
@@ -32,18 +33,23 @@ DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTAT
DEVICE_OP_STATUS_NOT_ENABLED = {DEVICE_OP_STATUS_UNDEFINED, DEVICE_OP_STATUS_DISABLED}
KPISAMPLETYPE_UNKNOWN = KpiSampleType.KPISAMPLETYPE_UNKNOWN
-class EventsDeviceCollector:
+class EventsDeviceCollector(EventsCollector):
def __init__(self, name_mapping : NameMapping) -> None: # pylint: disable=redefined-outer-name
- self._events_queue = queue.Queue()
-
self._context_client_grpc = ContextClient()
- self._device_stream = self._context_client_grpc.GetDeviceEvents(Empty())
- self._context_client = self._context_client_grpc
- self._channel = self._context_client_grpc.channel
+ self._context_client = self._context_client_grpc
+ self._channel = self._context_client_grpc.channel
+ super().__init__(
+ self._context_client,
+ activate_context_collector=False,
+ activate_topology_collector=False,
+ activate_device_collector=True,
+ activate_link_collector=False,
+ activate_service_collector=False,
+ activate_slice_collector=False,
+ activate_connection_collector=False,
+ )
self._monitoring_client = MonitoringClient(host='127.0.0.1')
- self._device_thread = threading.Thread(target=self._collect, args=(self._device_stream,), daemon=False)
-
#self._device_to_state : Dict[str, DeviceOperationalStatusEnum] = dict()
self._device_endpoint_monitored : Dict[str, Dict[str, bool]] = dict()
self._name_mapping = name_mapping
@@ -55,110 +61,87 @@ class EventsDeviceCollector:
except grpc.FutureTimeoutError:
return False
- def _collect(self, events_stream):
- try:
- for event in events_stream:
- self._events_queue.put_nowait(event)
- except grpc.RpcError as e:
- if e.code() != grpc.StatusCode.CANCELLED: # pylint: disable=no-member
- raise # pragma: no cover
-
- def start(self):
- try:
- self._device_thread.start()
- except RuntimeError:
- LOGGER.exception('Start EventTools exception')
-
- def get_event(self, block : bool = True, timeout : float = 0.1):
- return self._events_queue.get(block=block, timeout=timeout)
-
- def stop(self):
- self._device_stream.cancel()
- self._device_thread.join()
-
def listen_events(self):
try:
kpi_id_list = []
while True:
- try:
- event = self.get_event(block=True, timeout=0.5)
-
- event_type = event.event.event_type
- device_uuid = event.device_id.device_uuid.uuid
- if event_type in {EventTypeEnum.EVENTTYPE_REMOVE}:
- LOGGER.debug('Ignoring REMOVE event: {:s}'.format(grpc_message_to_json_string(event)))
- self._device_endpoint_monitored.pop(device_uuid, None)
- continue
-
- if event_type not in {EventTypeEnum.EVENTTYPE_CREATE, EventTypeEnum.EVENTTYPE_UPDATE}:
- LOGGER.debug('Ignoring UNKNOWN event type: {:s}'.format(grpc_message_to_json_string(event)))
- continue
-
- device = self._context_client.GetDevice(event.device_id)
- self._name_mapping.set_device_name(device_uuid, device.name)
-
- device_op_status = device.device_operational_status
- if device_op_status != DEVICE_OP_STATUS_ENABLED:
- LOGGER.debug('Ignoring Device not enabled: {:s}'.format(grpc_message_to_json_string(device)))
- continue
-
- enabled_endpoint_names = set()
- for config_rule in device.device_config.config_rules:
- if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue
- if config_rule.WhichOneof('config_rule') != 'custom': continue
- str_resource_key = str(config_rule.custom.resource_key)
- if str_resource_key.startswith('/interface[') or str_resource_key.startswith('/endpoints/endpoint['):
- json_resource_value = json.loads(config_rule.custom.resource_value)
- if 'name' not in json_resource_value: continue
- if 'enabled' in json_resource_value:
- if not json_resource_value['enabled']: continue
- enabled_endpoint_names.add(json_resource_value['name'])
- if 'oper-status' in json_resource_value:
- if str(json_resource_value['oper-status']).upper() != 'UP': continue
- enabled_endpoint_names.add(json_resource_value['name'])
-
- endpoints_monitored = self._device_endpoint_monitored.setdefault(device_uuid, dict())
- for endpoint in device.device_endpoints:
- endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
- endpoint_name_or_uuid = endpoint.name
- if endpoint_name_or_uuid is None or len(endpoint_name_or_uuid) == 0:
- endpoint_name_or_uuid = endpoint_uuid
-
- self._name_mapping.set_endpoint_name(endpoint_uuid, endpoint.name)
-
- endpoint_was_monitored = endpoints_monitored.get(endpoint_uuid, False)
- endpoint_is_enabled = (endpoint_name_or_uuid in enabled_endpoint_names)
-
- if not endpoint_was_monitored and not endpoint_is_enabled:
- # endpoint is idle, do nothing
- pass
- elif not endpoint_was_monitored and endpoint_is_enabled:
- # activate
- for value in endpoint.kpi_sample_types:
- if value == KPISAMPLETYPE_UNKNOWN: continue
-
- kpi_descriptor = monitoring_pb2.KpiDescriptor()
- kpi_descriptor.kpi_description = device.device_type
- kpi_descriptor.kpi_sample_type = value
- kpi_descriptor.device_id.CopyFrom(device.device_id) # pylint: disable=no-member
- kpi_descriptor.endpoint_id.CopyFrom(endpoint.endpoint_id) # pylint: disable=no-member
-
- kpi_id = self._monitoring_client.SetKpi(kpi_descriptor)
- kpi_id_list.append(kpi_id)
- endpoints_monitored[endpoint_uuid] = True
- else:
- MSG = 'Not implemented condition: event={:s} device={:s} endpoint={:s}' + \
- ' endpoint_was_monitored={:s} endpoint_is_enabled={:s}'
- LOGGER.warning(MSG.format(
- grpc_message_to_json_string(event), grpc_message_to_json_string(device),
- grpc_message_to_json_string(endpoint), str(endpoint_was_monitored),
- str(endpoint_is_enabled)
- ))
-
- except queue.Empty:
+ event = self.get_event(block=True, timeout=0.5)
+ if event is None:
break
+ event_type = event.event.event_type
+ device_uuid = event.device_id.device_uuid.uuid
+ if event_type in {EventTypeEnum.EVENTTYPE_REMOVE}:
+ LOGGER.debug('Ignoring REMOVE event: {:s}'.format(grpc_message_to_json_string(event)))
+ self._device_endpoint_monitored.pop(device_uuid, None)
+ continue
+
+ if event_type not in {EventTypeEnum.EVENTTYPE_CREATE, EventTypeEnum.EVENTTYPE_UPDATE}:
+ LOGGER.debug('Ignoring UNKNOWN event type: {:s}'.format(grpc_message_to_json_string(event)))
+ continue
+
+ device = self._context_client.GetDevice(event.device_id)
+ self._name_mapping.set_device_name(device_uuid, device.name)
+
+ device_op_status = device.device_operational_status
+ if device_op_status != DEVICE_OP_STATUS_ENABLED:
+ LOGGER.debug('Ignoring Device not enabled: {:s}'.format(grpc_message_to_json_string(device)))
+ continue
+
+ enabled_endpoint_names = set()
+ for config_rule in device.device_config.config_rules:
+ if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue
+ if config_rule.WhichOneof('config_rule') != 'custom': continue
+ str_resource_key = str(config_rule.custom.resource_key)
+ if str_resource_key.startswith('/interface[') or str_resource_key.startswith('/endpoints/endpoint['):
+ json_resource_value = json.loads(config_rule.custom.resource_value)
+ if 'name' not in json_resource_value: continue
+ if 'enabled' in json_resource_value:
+ if not json_resource_value['enabled']: continue
+ enabled_endpoint_names.add(json_resource_value['name'])
+ if 'oper-status' in json_resource_value:
+ if str(json_resource_value['oper-status']).upper() != 'UP': continue
+ enabled_endpoint_names.add(json_resource_value['name'])
+
+ endpoints_monitored = self._device_endpoint_monitored.setdefault(device_uuid, dict())
+ for endpoint in device.device_endpoints:
+ endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
+ endpoint_name_or_uuid = endpoint.name
+ if endpoint_name_or_uuid is None or len(endpoint_name_or_uuid) == 0:
+ endpoint_name_or_uuid = endpoint_uuid
+
+ self._name_mapping.set_endpoint_name(endpoint_uuid, endpoint.name)
+
+ endpoint_was_monitored = endpoints_monitored.get(endpoint_uuid, False)
+ endpoint_is_enabled = (endpoint_name_or_uuid in enabled_endpoint_names)
+
+ if not endpoint_was_monitored and not endpoint_is_enabled:
+ # endpoint is idle, do nothing
+ pass
+ elif not endpoint_was_monitored and endpoint_is_enabled:
+ # activate
+ for value in endpoint.kpi_sample_types:
+ if value == KPISAMPLETYPE_UNKNOWN: continue
+
+ kpi_descriptor = monitoring_pb2.KpiDescriptor()
+ kpi_descriptor.kpi_description = device.device_type
+ kpi_descriptor.kpi_sample_type = value
+ kpi_descriptor.device_id.CopyFrom(device.device_id) # pylint: disable=no-member
+ kpi_descriptor.endpoint_id.CopyFrom(endpoint.endpoint_id) # pylint: disable=no-member
+
+ kpi_id = self._monitoring_client.SetKpi(kpi_descriptor)
+ kpi_id_list.append(kpi_id)
+ endpoints_monitored[endpoint_uuid] = True
+ else:
+ MSG = 'Not implemented condition: event={:s} device={:s} endpoint={:s}' + \
+ ' endpoint_was_monitored={:s} endpoint_is_enabled={:s}'
+ LOGGER.warning(MSG.format(
+ grpc_message_to_json_string(event), grpc_message_to_json_string(device),
+ grpc_message_to_json_string(endpoint), str(endpoint_was_monitored),
+ str(endpoint_is_enabled)
+ ))
+
return kpi_id_list
except ServiceException:
LOGGER.exception('ListenEvents exception')
diff --git a/src/monitoring/service/__main__.py b/src/monitoring/service/__main__.py
index 6d2cb40adc354806edf8fea41e80dfceb139cb09..2366e73eecb14233b4d1c600157f53b5e71d313f 100644
--- a/src/monitoring/service/__main__.py
+++ b/src/monitoring/service/__main__.py
@@ -57,7 +57,7 @@ def start_monitoring(name_mapping : NameMapping):
# Terminate is set, looping terminates
LOGGER.warning("Stopping execution...")
- events_collector.start()
+ events_collector.stop()
def main():
global LOGGER # pylint: disable=global-statement
diff --git a/src/monitoring/tests/test_event_tools_reconnect.py b/src/monitoring/tests/test_event_tools_reconnect.py
new file mode 100644
index 0000000000000000000000000000000000000000..20fcbf52bc148062a12e27a4e64bf57d1f238d47
--- /dev/null
+++ b/src/monitoring/tests/test_event_tools_reconnect.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python3
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc
+import common.tools.grpc.BaseEventCollector as base_event_collector_module
+from common.proto.context_pb2 import DeviceEvent, EventTypeEnum
+from monitoring.service import EventTools as event_tools_module
+from monitoring.service.NameMapping import NameMapping
+
+
+class _FakeRpcError(grpc.RpcError):
+ def __init__(self, status_code):
+ self._status_code = status_code
+
+ def code(self):
+ return self._status_code
+
+
+class _FakeStream:
+ def __init__(self, events=None, error=None):
+ self._events = iter(events or [])
+ self._error = error
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self._error is not None:
+ error = self._error
+ self._error = None
+ raise error
+ return next(self._events)
+
+ def cancel(self):
+ pass
+
+
+class _FakeContextClient:
+ def __init__(self):
+ self.channel = object()
+ self.calls = 0
+
+ def GetDeviceEvents(self, _request):
+ self.calls += 1
+ if self.calls == 1:
+ return _FakeStream(error=_FakeRpcError(grpc.StatusCode.UNAVAILABLE))
+ if self.calls == 2:
+ return _FakeStream(events=[_create_device_event()])
+ return _FakeStream(error=_FakeRpcError(grpc.StatusCode.CANCELLED))
+
+
+class _FakeMonitoringClient:
+ def __init__(self, host='127.0.0.1'):
+ self.host = host
+
+
+def _create_device_event():
+ event = DeviceEvent()
+ event.event.event_type = EventTypeEnum.EVENTTYPE_CREATE
+ event.event.timestamp.timestamp = 1.0
+ event.device_id.device_uuid.uuid = 'dev1'
+ return event
+
+
+def test_events_device_collector_reconnects_on_unavailable(monkeypatch):
+ fake_context_client = _FakeContextClient()
+
+ monkeypatch.setattr(event_tools_module, 'ContextClient', lambda: fake_context_client)
+ monkeypatch.setattr(event_tools_module, 'MonitoringClient', _FakeMonitoringClient)
+ monkeypatch.setattr(base_event_collector_module.time, 'sleep', lambda _seconds: None)
+
+ collector = event_tools_module.EventsDeviceCollector(NameMapping())
+ collector.start()
+ try:
+ event = collector.get_event(block=True, timeout=1.0)
+ finally:
+ collector.stop()
+
+ assert fake_context_client.calls >= 2
+ assert event.device_id.device_uuid.uuid == 'dev1'
diff --git a/src/nbi/Dockerfile b/src/nbi/Dockerfile
index 1b0d841f344bb06f871118302f565b61cc26bd1d..025120a944f63c3ab745fcfffc9a599864edffb2 100644
--- a/src/nbi/Dockerfile
+++ b/src/nbi/Dockerfile
@@ -72,7 +72,6 @@ RUN python3 -m pip install -r requirements.txt
# Add component files into working directory
WORKDIR /var/teraflow
-COPY src/nbi/. nbi/
COPY src/context/__init__.py context/__init__.py
COPY src/context/client/. context/client/
COPY src/device/__init__.py device/__init__.py
@@ -95,6 +94,7 @@ COPY src/vnt_manager/__init__.py vnt_manager/__init__.py
COPY src/vnt_manager/client/. vnt_manager/client/
RUN mkdir -p /var/teraflow/tests/tools
COPY src/tests/tools/mock_osm/. tests/tools/mock_osm/
+COPY src/nbi/. nbi/
# Start the service
# NOTE: Configured single worker to prevent issues with multi-worker synchronization. To be invetsigated.
diff --git a/src/nbi/service/NbiApplication.py b/src/nbi/service/NbiApplication.py
index ad02c754c88515f43c29745061738e36cbdd8b09..65e9c5ac3cbf740195873de942a9aa0ffec6a3a3 100644
--- a/src/nbi/service/NbiApplication.py
+++ b/src/nbi/service/NbiApplication.py
@@ -18,7 +18,7 @@ from typing import Any, List, Optional, Tuple
from flask import Flask, request
from flask_restful import Api, Resource
from flask_socketio import Namespace, SocketIO
-from common.tools.kafka.Variables import KafkaConfig, KafkaTopic
+#from common.tools.kafka.Variables import KafkaConfig, KafkaTopic
from nbi.Config import SECRET_KEY
diff --git a/src/nbi/service/dscm_oc/routes.py b/src/nbi/service/dscm_oc/routes.py
index e0ca2766f56fe29349adfd83131b6310e66a9cd8..b597cef48665d74ecab00bafe476b530828e50c4 100644
--- a/src/nbi/service/dscm_oc/routes.py
+++ b/src/nbi/service/dscm_oc/routes.py
@@ -33,7 +33,7 @@ from pluggables.client.PluggablesClient import PluggablesClient
LOGGER = logging.getLogger(__name__)
-logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+
blueprint = Blueprint("testconf_dscm", __name__)
diff --git a/src/nbi/service/ietf_l2vpn/Constants.py b/src/nbi/service/ietf_l2vpn/Constants.py
index e7b30ab54224ac3a1757bb6b92042c4edea4368c..10d4b867e94820cc7f1e15aa92d9c2a42031a9d7 100644
--- a/src/nbi/service/ietf_l2vpn/Constants.py
+++ b/src/nbi/service/ietf_l2vpn/Constants.py
@@ -17,75 +17,87 @@ DEFAULT_ADDRESS_FAMILIES = ['IPV4']
DEFAULT_BGP_AS = 65000
DEFAULT_BGP_ROUTE_TARGET = '{:d}:{:d}'.format(DEFAULT_BGP_AS, 333)
-# TODO: improve definition of bearer mappings
+# TODO: improve definition of bearer mappings ; should be configure through
+# Logical Resources component whenever the component is available.
# Bearer mappings:
-# device_uuid:endpoint_uuid => (
+# prefix:device_uuid:endpoint_uuid => (
# device_uuid, endpoint_uuid, router_id, route_dist, sub_if_index,
-# address_ip, address_prefix, remote_router, circuit_id)
+# address_ip, address_prefix, remote_router, circuit_id
+# )
BEARER_MAPPINGS = {
+ # OSM End-to-End Test
+ 'OSM-E2E:r1:Ethernet10': ('r1', 'Ethernet10', None, None, 0, '172.16.1.1', 24, None, None),
+ 'OSM-E2E:r3:Ethernet10': ('r3', 'Ethernet10', None, None, 0, '172.16.3.1', 24, None, None),
+ 'OSM-E2E:r1:Ethernet11': ('r1', 'Ethernet11', None, None, 0, '172.17.1.1', 24, None, None),
+ 'OSM-E2E:r3:Ethernet11': ('r3', 'Ethernet11', None, None, 0, '172.17.3.1', 24, None, None),
+
+ # SNS4SNS'26
+ 'SNS4SNS26:SiteA': ('router-1', 'Ethernet10', None, None, 0, '192.168.251.5', 24, None, None),
+ 'SNS4SNS26:SiteB': ('router-3', 'Ethernet10', None, None, 0, '192.168.252.5', 24, None, None),
+
# OFC'22
- 'R1-EMU:13/1/2': ('R1-EMU', '13/1/2', '10.10.10.1', '65000:100', 400, '3.3.2.1', 24, None, None),
- 'R2-EMU:13/1/2': ('R2-EMU', '13/1/2', '12.12.12.1', '65000:120', 450, '3.4.2.1', 24, None, None),
- 'R3-EMU:13/1/2': ('R3-EMU', '13/1/2', '20.20.20.1', '65000:200', 500, '3.3.1.1', 24, None, None),
- 'R4-EMU:13/1/2': ('R4-EMU', '13/1/2', '22.22.22.1', '65000:220', 550, '3.4.1.1', 24, None, None),
+ 'OFC22:R1-EMU:13/1/2': ('R1-EMU', '13/1/2', '10.10.10.1', '65000:100', 400, '3.3.2.1', 24, None, None),
+ 'OFC22:R2-EMU:13/1/2': ('R2-EMU', '13/1/2', '12.12.12.1', '65000:120', 450, '3.4.2.1', 24, None, None),
+ 'OFC22:R3-EMU:13/1/2': ('R3-EMU', '13/1/2', '20.20.20.1', '65000:200', 500, '3.3.1.1', 24, None, None),
+ 'OFC22:R4-EMU:13/1/2': ('R4-EMU', '13/1/2', '22.22.22.1', '65000:220', 550, '3.4.1.1', 24, None, None),
# OECC/PSC'22 - domain 1
- 'R1@D1:3/1' : ('R1@D1', '3/1', '10.0.1.1', '65001:101', 100, '1.1.3.1', 24, None, None),
- 'R1@D1:3/2' : ('R1@D1', '3/2', '10.0.1.1', '65001:101', 100, '1.1.3.2', 24, None, None),
- 'R1@D1:3/3' : ('R1@D1', '3/3', '10.0.1.1', '65001:101', 100, '1.1.3.3', 24, None, None),
- 'R2@D1:3/1' : ('R2@D1', '3/1', '10.0.1.2', '65001:102', 100, '1.2.3.1', 24, None, None),
- 'R2@D1:3/2' : ('R2@D1', '3/2', '10.0.1.2', '65001:102', 100, '1.2.3.2', 24, None, None),
- 'R2@D1:3/3' : ('R2@D1', '3/3', '10.0.1.2', '65001:102', 100, '1.2.3.3', 24, None, None),
- 'R3@D1:3/1' : ('R3@D1', '3/1', '10.0.1.3', '65001:103', 100, '1.3.3.1', 24, None, None),
- 'R3@D1:3/2' : ('R3@D1', '3/2', '10.0.1.3', '65001:103', 100, '1.3.3.2', 24, None, None),
- 'R3@D1:3/3' : ('R3@D1', '3/3', '10.0.1.3', '65001:103', 100, '1.3.3.3', 24, None, None),
- 'R4@D1:3/1' : ('R4@D1', '3/1', '10.0.1.4', '65001:104', 100, '1.4.3.1', 24, None, None),
- 'R4@D1:3/2' : ('R4@D1', '3/2', '10.0.1.4', '65001:104', 100, '1.4.3.2', 24, None, None),
- 'R4@D1:3/3' : ('R4@D1', '3/3', '10.0.1.4', '65001:104', 100, '1.4.3.3', 24, None, None),
+ 'OECCPSC22D1:R1@D1:3/1' : ('R1@D1', '3/1', '10.0.1.1', '65001:101', 100, '1.1.3.1', 24, None, None),
+ 'OECCPSC22D1:R1@D1:3/2' : ('R1@D1', '3/2', '10.0.1.1', '65001:101', 100, '1.1.3.2', 24, None, None),
+ 'OECCPSC22D1:R1@D1:3/3' : ('R1@D1', '3/3', '10.0.1.1', '65001:101', 100, '1.1.3.3', 24, None, None),
+ 'OECCPSC22D1:R2@D1:3/1' : ('R2@D1', '3/1', '10.0.1.2', '65001:102', 100, '1.2.3.1', 24, None, None),
+ 'OECCPSC22D1:R2@D1:3/2' : ('R2@D1', '3/2', '10.0.1.2', '65001:102', 100, '1.2.3.2', 24, None, None),
+ 'OECCPSC22D1:R2@D1:3/3' : ('R2@D1', '3/3', '10.0.1.2', '65001:102', 100, '1.2.3.3', 24, None, None),
+ 'OECCPSC22D1:R3@D1:3/1' : ('R3@D1', '3/1', '10.0.1.3', '65001:103', 100, '1.3.3.1', 24, None, None),
+ 'OECCPSC22D1:R3@D1:3/2' : ('R3@D1', '3/2', '10.0.1.3', '65001:103', 100, '1.3.3.2', 24, None, None),
+ 'OECCPSC22D1:R3@D1:3/3' : ('R3@D1', '3/3', '10.0.1.3', '65001:103', 100, '1.3.3.3', 24, None, None),
+ 'OECCPSC22D1:R4@D1:3/1' : ('R4@D1', '3/1', '10.0.1.4', '65001:104', 100, '1.4.3.1', 24, None, None),
+ 'OECCPSC22D1:R4@D1:3/2' : ('R4@D1', '3/2', '10.0.1.4', '65001:104', 100, '1.4.3.2', 24, None, None),
+ 'OECCPSC22D1:R4@D1:3/3' : ('R4@D1', '3/3', '10.0.1.4', '65001:104', 100, '1.4.3.3', 24, None, None),
# OECC/PSC'22 - domain 2
- 'R1@D2:3/1' : ('R1@D2', '3/1', '10.0.2.1', '65002:101', 100, '2.1.3.1', 24, None, None),
- 'R1@D2:3/2' : ('R1@D2', '3/2', '10.0.2.1', '65002:101', 100, '2.1.3.2', 24, None, None),
- 'R1@D2:3/3' : ('R1@D2', '3/3', '10.0.2.1', '65002:101', 100, '2.1.3.3', 24, None, None),
- 'R2@D2:3/1' : ('R2@D2', '3/1', '10.0.2.2', '65002:102', 100, '2.2.3.1', 24, None, None),
- 'R2@D2:3/2' : ('R2@D2', '3/2', '10.0.2.2', '65002:102', 100, '2.2.3.2', 24, None, None),
- 'R2@D2:3/3' : ('R2@D2', '3/3', '10.0.2.2', '65002:102', 100, '2.2.3.3', 24, None, None),
- 'R3@D2:3/1' : ('R3@D2', '3/1', '10.0.2.3', '65002:103', 100, '2.3.3.1', 24, None, None),
- 'R3@D2:3/2' : ('R3@D2', '3/2', '10.0.2.3', '65002:103', 100, '2.3.3.2', 24, None, None),
- 'R3@D2:3/3' : ('R3@D2', '3/3', '10.0.2.3', '65002:103', 100, '2.3.3.3', 24, None, None),
- 'R4@D2:3/1' : ('R4@D2', '3/1', '10.0.2.4', '65002:104', 100, '2.4.3.1', 24, None, None),
- 'R4@D2:3/2' : ('R4@D2', '3/2', '10.0.2.4', '65002:104', 100, '2.4.3.2', 24, None, None),
- 'R4@D2:3/3' : ('R4@D2', '3/3', '10.0.2.4', '65002:104', 100, '2.4.3.3', 24, None, None),
+ 'OECCPSC22D1:R1@D2:3/1' : ('R1@D2', '3/1', '10.0.2.1', '65002:101', 100, '2.1.3.1', 24, None, None),
+ 'OECCPSC22D1:R1@D2:3/2' : ('R1@D2', '3/2', '10.0.2.1', '65002:101', 100, '2.1.3.2', 24, None, None),
+ 'OECCPSC22D1:R1@D2:3/3' : ('R1@D2', '3/3', '10.0.2.1', '65002:101', 100, '2.1.3.3', 24, None, None),
+ 'OECCPSC22D1:R2@D2:3/1' : ('R2@D2', '3/1', '10.0.2.2', '65002:102', 100, '2.2.3.1', 24, None, None),
+ 'OECCPSC22D1:R2@D2:3/2' : ('R2@D2', '3/2', '10.0.2.2', '65002:102', 100, '2.2.3.2', 24, None, None),
+ 'OECCPSC22D1:R2@D2:3/3' : ('R2@D2', '3/3', '10.0.2.2', '65002:102', 100, '2.2.3.3', 24, None, None),
+ 'OECCPSC22D1:R3@D2:3/1' : ('R3@D2', '3/1', '10.0.2.3', '65002:103', 100, '2.3.3.1', 24, None, None),
+ 'OECCPSC22D1:R3@D2:3/2' : ('R3@D2', '3/2', '10.0.2.3', '65002:103', 100, '2.3.3.2', 24, None, None),
+ 'OECCPSC22D1:R3@D2:3/3' : ('R3@D2', '3/3', '10.0.2.3', '65002:103', 100, '2.3.3.3', 24, None, None),
+ 'OECCPSC22D1:R4@D2:3/1' : ('R4@D2', '3/1', '10.0.2.4', '65002:104', 100, '2.4.3.1', 24, None, None),
+ 'OECCPSC22D1:R4@D2:3/2' : ('R4@D2', '3/2', '10.0.2.4', '65002:104', 100, '2.4.3.2', 24, None, None),
+ 'OECCPSC22D1:R4@D2:3/3' : ('R4@D2', '3/3', '10.0.2.4', '65002:104', 100, '2.4.3.3', 24, None, None),
# ECOC'22
- 'DC1-GW:CS1-GW1': ('CS1-GW1', '10/1', '5.5.1.1', None, 0, None, None, '5.5.2.1', 111),
- 'DC1-GW:CS1-GW2': ('CS1-GW2', '10/1', '5.5.1.2', None, 0, None, None, '5.5.2.2', 222),
- 'DC2-GW:CS2-GW1': ('CS2-GW1', '10/1', '5.5.2.1', None, 0, None, None, '5.5.1.1', 111),
- 'DC2-GW:CS2-GW2': ('CS2-GW2', '10/1', '5.5.2.2', None, 0, None, None, '5.5.1.2', 222),
+ 'ECOC22:DC1-GW:CS1-GW1': ('CS1-GW1', '10/1', '5.5.1.1', None, 0, None, None, '5.5.2.1', 111),
+ 'ECOC22:DC1-GW:CS1-GW2': ('CS1-GW2', '10/1', '5.5.1.2', None, 0, None, None, '5.5.2.2', 222),
+ 'ECOC22:DC2-GW:CS2-GW1': ('CS2-GW1', '10/1', '5.5.2.1', None, 0, None, None, '5.5.1.1', 111),
+ 'ECOC22:DC2-GW:CS2-GW2': ('CS2-GW2', '10/1', '5.5.2.2', None, 0, None, None, '5.5.1.2', 222),
# NetworkX'22
- 'R1:1/2': ('R1', '1/2', '5.1.1.2', None, 0, None, None, None, None),
- 'R1:1/3': ('R1', '1/3', '5.1.1.3', None, 0, None, None, None, None),
- 'R2:1/2': ('R2', '1/2', '5.2.1.2', None, 0, None, None, None, None),
- 'R2:1/3': ('R2', '1/3', '5.2.1.3', None, 0, None, None, None, None),
- 'R3:1/2': ('R3', '1/2', '5.3.1.2', None, 0, None, None, None, None),
- 'R3:1/3': ('R3', '1/3', '5.3.1.3', None, 0, None, None, None, None),
- 'R4:1/2': ('R4', '1/2', '5.4.1.2', None, 0, None, None, None, None),
- 'R4:1/3': ('R4', '1/3', '5.4.1.3', None, 0, None, None, None, None),
+ 'NETX22:R1:1/2': ('R1', '1/2', '5.1.1.2', None, 0, None, None, None, None),
+ 'NETX22:R1:1/3': ('R1', '1/3', '5.1.1.3', None, 0, None, None, None, None),
+ 'NETX22:R2:1/2': ('R2', '1/2', '5.2.1.2', None, 0, None, None, None, None),
+ 'NETX22:R2:1/3': ('R2', '1/3', '5.2.1.3', None, 0, None, None, None, None),
+ 'NETX22:R3:1/2': ('R3', '1/2', '5.3.1.2', None, 0, None, None, None, None),
+ 'NETX22:R3:1/3': ('R3', '1/3', '5.3.1.3', None, 0, None, None, None, None),
+ 'NETX22:R4:1/2': ('R4', '1/2', '5.4.1.2', None, 0, None, None, None, None),
+ 'NETX22:R4:1/3': ('R4', '1/3', '5.4.1.3', None, 0, None, None, None, None),
# OFC'23
- 'PE1:1/1': ('PE1', '1/1', '10.1.1.1', None, 0, None, None, None, None),
- 'PE1:1/2': ('PE1', '1/2', '10.1.1.2', None, 0, None, None, None, None),
- 'PE2:1/1': ('PE2', '1/1', '10.2.1.1', None, 0, None, None, None, None),
- 'PE2:1/2': ('PE2', '1/2', '10.2.1.2', None, 0, None, None, None, None),
- 'PE3:1/1': ('PE3', '1/1', '10.3.1.1', None, 0, None, None, None, None),
- 'PE3:1/2': ('PE3', '1/2', '10.3.1.2', None, 0, None, None, None, None),
- 'PE4:1/1': ('PE4', '1/1', '10.4.1.1', None, 0, None, None, None, None),
- 'PE4:1/2': ('PE4', '1/2', '10.4.1.2', None, 0, None, None, None, None),
+ 'OFC23:PE1:1/1': ('PE1', '1/1', '10.1.1.1', None, 0, None, None, None, None),
+ 'OFC23:PE1:1/2': ('PE1', '1/2', '10.1.1.2', None, 0, None, None, None, None),
+ 'OFC23:PE2:1/1': ('PE2', '1/1', '10.2.1.1', None, 0, None, None, None, None),
+ 'OFC23:PE2:1/2': ('PE2', '1/2', '10.2.1.2', None, 0, None, None, None, None),
+ 'OFC23:PE3:1/1': ('PE3', '1/1', '10.3.1.1', None, 0, None, None, None, None),
+ 'OFC23:PE3:1/2': ('PE3', '1/2', '10.3.1.2', None, 0, None, None, None, None),
+ 'OFC23:PE4:1/1': ('PE4', '1/1', '10.4.1.1', None, 0, None, None, None, None),
+ 'OFC23:PE4:1/2': ('PE4', '1/2', '10.4.1.2', None, 0, None, None, None, None),
- 'R149:eth-1/0/22': ('R149', 'eth-1/0/22', '5.5.5.5', None, 0, None, None, '5.5.5.1', '100'),
- 'R155:eth-1/0/22': ('R155', 'eth-1/0/22', '5.5.5.1', None, 0, None, None, '5.5.5.5', '100'),
- 'R199:eth-1/0/21': ('R199', 'eth-1/0/21', '5.5.5.6', None, 0, None, None, '5.5.5.5', '100'),
+ 'OFC23:R149:eth-1/0/22': ('R149', 'eth-1/0/22', '5.5.5.5', None, 0, None, None, '5.5.5.1', '100'),
+ 'OFC23:R155:eth-1/0/22': ('R155', 'eth-1/0/22', '5.5.5.1', None, 0, None, None, '5.5.5.5', '100'),
+ 'OFC23:R199:eth-1/0/21': ('R199', 'eth-1/0/21', '5.5.5.6', None, 0, None, None, '5.5.5.5', '100'),
}
diff --git a/src/nbi/service/ietf_l2vpn/Handlers.py b/src/nbi/service/ietf_l2vpn/Handlers.py
index 48b2eff147c6afb6f905c19aa57bad0a26f275a2..5941cac08488c34155b7fb1fe4cc335982942f18 100644
--- a/src/nbi/service/ietf_l2vpn/Handlers.py
+++ b/src/nbi/service/ietf_l2vpn/Handlers.py
@@ -86,7 +86,17 @@ def process_site_network_access(
bearer_mapping = BEARER_MAPPINGS.get(bearer_reference)
if bearer_mapping is None:
if ':' in bearer_reference:
- bearer_mapping = str(bearer_reference).split(':', maxsplit=1)
+ bearer_mapping = str(bearer_reference).split(':')
+ if len(bearer_mapping) == 2:
+ # assume device:endpoint
+ pass
+ elif len(bearer_mapping) == 3:
+ # assume prefix:device:endpoint
+ bearer_mapping.pop(0)
+ else:
+ MSG = 'Bearer({:s}) not found; unable to auto-generated mapping'
+ raise Exception(MSG.format(str(bearer_reference)))
+
bearer_mapping.extend([None, None, None, None, None, None, None])
bearer_mapping = tuple(bearer_mapping)
MSG = 'Bearer({:s}) not found; auto-generated mapping: {:s}'
@@ -103,12 +113,34 @@ def process_site_network_access(
service_uuid = network_access['vpn-attachment']['vpn-id']
network_access_connection = network_access['connection']
- encapsulation_type = network_access_connection['encapsulation-type']
+
+ encapsulation_type = network_access_connection.get('encapsulation-type', 'ietf-l2vpn-svc:ethernet')
encapsulation_type = encapsulation_type.replace('ietf-l2vpn-svc:', '')
- if encapsulation_type != 'vlan':
- encapsulation_type = network_access_connection['encapsulation-type']
- MSG = 'EncapsulationType({:s}) not supported'
- raise NotImplementedError(MSG.format(str(encapsulation_type)))
+
+ eth_inf_type = None
+ if 'eth-inf-type' not in network_access_connection:
+ if 'tagged-interface' in network_access_connection:
+ if encapsulation_type == 'ethernet':
+ eth_inf_type = 'untagged'
+ elif encapsulation_type == 'vlan':
+ eth_inf_type = 'tagged'
+ if eth_inf_type is None:
+ eth_inf_type = network_access_connection.get('eth-inf-type', 'ietf-l2vpn-svc:untagged')
+ eth_inf_type = eth_inf_type.replace('ietf-l2vpn-svc:', '')
+
+ if encapsulation_type == 'ethernet' and eth_inf_type == 'untagged':
+ if 'tagged-interface' in network_access_connection:
+ MSG = 'Malformed NetworkAccessConnection({:s})'
+ raise Exception(MSG.format(str(network_access_connection)))
+ elif encapsulation_type == 'vlan' and eth_inf_type == 'tagged':
+ if 'tagged-interface' not in network_access_connection:
+ MSG = 'Malformed NetworkAccessConnection({:s})'
+ raise Exception(MSG.format(str(network_access_connection)))
+ else:
+ encapsulation_type = network_access_connection.get('encapsulation-type', 'ietf-l2vpn-svc:ethernet')
+ eth_inf_type = network_access_connection.get('eth-inf-type', 'ietf-l2vpn-svc:untagged')
+ MSG = 'Combination EncapsulationType({:s})/EthernetInterfaceType({:s}) not supported'
+ raise NotImplementedError(MSG.format(str(encapsulation_type), str(eth_inf_type)))
cvlan_tag_id = None
if 'tagged-interface' in network_access_connection:
diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
index e11beb62c9cd406bda957f3738b387ab5321b0b6..caf868e71e395030d5ec51c4088102c5555f3d4f 100644
--- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
+++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py
@@ -25,6 +25,7 @@ from nbi.service._tools.Authentication import HTTP_AUTH
from nbi.service._tools.HttpStatusCodes import (
HTTP_CREATED, HTTP_NOCONTENT, HTTP_SERVERERROR
)
+from .Constants import DEFAULT_MTU
from .Handlers import process_site_network_access
from .YangValidator import YangValidator
@@ -113,8 +114,12 @@ class L2VPN_SiteNetworkAccesses(Resource):
if 'encapsulation-type' in connection:
if connection['encapsulation-type'] == 'dot1q-vlan-tagged':
connection['encapsulation-type'] = 'vlan'
+ if 'eth-inf-type' not in connection:
+ connection['eth-inf-type'] = 'tagged'
else:
connection['encapsulation-type'] = 'ethernet'
+ if 'eth-inf-type' not in connection:
+ connection['eth-inf-type'] = 'untagged'
if 'tagged-interface' in connection:
tagged_interface = connection['tagged-interface']
if 'dot1q-vlan-tagged' in tagged_interface:
@@ -131,7 +136,7 @@ class L2VPN_SiteNetworkAccesses(Resource):
if 'service' not in site_network_access:
site_network_access['service'] = dict()
if 'svc-mtu' not in site_network_access['service']:
- site_network_access['service']['svc-mtu'] = 1500
+ site_network_access['service']['svc-mtu'] = DEFAULT_MTU
context_client = ContextClient()
vpn_services = list()
@@ -180,7 +185,7 @@ class L2VPN_SiteNetworkAccesses(Resource):
}}
MSG = '[_prepare_request_payload] request_data={:s}'
- LOGGER.warning(MSG.format(str(request_data)))
+ LOGGER.debug(MSG.format(str(request_data)))
return request_data
errors.append('Unexpected request: {:s}'.format(str(request_data)))
diff --git a/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py b/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py
index 6857bce610ce63248e3f9c25d4bc4c1c853ca358..00bcff93f685db230da0e94330bfb2aa6251273d 100644
--- a/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py
+++ b/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py
@@ -30,18 +30,40 @@ RE_IF = re.compile(r'^\/interface\[([^\]]+)\]$')
RE_SUBIF = re.compile(r'^\/interface\[([^\]]+)\]\/subinterface\[([^\]]+)\]$')
RE_SR = re.compile(r'^\/network_instance\[([^\]]+)\]\/protocols\[STATIC\]/route\[([^\:]+)\:([^\]]+)\]$')
+def _safe_int(value: Optional[object]) -> Optional[int]:
+ try:
+ return int(value) if value is not None else None
+ except (TypeError, ValueError):
+ return None
+
+def _safe_bool(value: Optional[object]) -> Optional[bool]:
+ if value is None:
+ return None
+ if isinstance(value, bool):
+ return value
+ if isinstance(value, (int, float)):
+ return bool(value)
+ if isinstance(value, str):
+ lowered = value.strip().lower()
+ if lowered in {'true', '1', 'yes', 'y', 'on', 'tagged'}:
+ return True
+ if lowered in {'false', '0', 'no', 'n', 'off', 'untagged'}:
+ return False
+ return None
+
def _interface(
interface : str, if_type : Optional[str] = 'l3ipvlan', index : int = 0, vlan_id : Optional[int] = None,
address_ip : Optional[str] = None, address_prefix : Optional[int] = None, mtu : Optional[int] = None,
- enabled : bool = True
+ enabled : Optional[bool] = True
) -> Tuple[str, Dict]:
path = '/interface[{:s}]/subinterface[{:d}]'.format(interface, index)
- data = {'name': interface, 'type': if_type, 'index': index, 'enabled': enabled}
- if if_type is not None: data['type'] = if_type
- if vlan_id is not None: data['vlan_id'] = vlan_id
- if address_ip is not None: data['address_ip'] = address_ip
+ data = {'name': interface, 'type': if_type, 'index': index}
+ if if_type is not None: data['type' ] = if_type
+ if vlan_id is not None: data['vlan_id' ] = vlan_id
+ if address_ip is not None: data['address_ip' ] = address_ip
if address_prefix is not None: data['address_prefix'] = address_prefix
- if mtu is not None: data['mtu'] = mtu
+ if mtu is not None: data['mtu' ] = mtu
+ if enabled is not None: data['enabled' ] = enabled
return path, data
def _network_instance(ni_name : str, ni_type : str) -> Tuple[str, Dict]:
@@ -49,6 +71,11 @@ def _network_instance(ni_name : str, ni_type : str) -> Tuple[str, Dict]:
data = {'name': ni_name, 'type': ni_type}
return path, data
+def _network_instance_vlan(ni_name : str, vlan_id : int, vlan_name : str = None) -> Tuple[str, Dict]:
+ path = '/network_instance[{:s}]/vlan[{:s}]'.format(ni_name, str(vlan_id))
+ data = {'name': ni_name, 'vlan_id': vlan_id, 'vlan_name': vlan_name}
+ return path, data
+
def _network_instance_protocol(ni_name : str, protocol : str) -> Tuple[str, Dict]:
path = '/network_instance[{:s}]/protocols[{:s}]'.format(ni_name, protocol)
data = {'name': ni_name, 'identifier': protocol, 'protocol_name': protocol}
@@ -75,6 +102,7 @@ def _network_instance_interface(ni_name : str, interface : str, sub_interface_in
data = {'name': ni_name, 'id': sub_interface_name, 'interface': interface, 'subinterface': sub_interface_index}
return path, data
+
class EndpointComposer:
def __init__(self, endpoint_uuid : str) -> None:
self.uuid = endpoint_uuid
@@ -82,12 +110,32 @@ class EndpointComposer:
self.sub_interface_index = 0
self.ipv4_address = None
self.ipv4_prefix_len = None
+ self.explicit_vlan_ids : Set[int] = set()
+ self.force_trunk = False
+ self.mtu : Optional[int] = None
+
+ def _add_vlan_id(self, vlan_id : Optional[int]) -> None:
+ if vlan_id is not None:
+ self.explicit_vlan_ids.add(vlan_id)
+
+ def _configure_from_settings(self, json_settings : Dict) -> None:
+ if not isinstance(json_settings, dict):
+ return
+ vlan_id = _safe_int(json_settings.get('vlan_id', json_settings.get('vlan-id')))
+ self._add_vlan_id(vlan_id)
+ mtu = _safe_int(json_settings.get('mtu'))
+ if mtu is not None and mtu > 0:
+ self.mtu = mtu
def configure(self, endpoint_obj : Optional[EndPoint], settings : Optional[TreeNode]) -> None:
if endpoint_obj is not None:
self.objekt = endpoint_obj
if settings is None: return
- json_settings : Dict = settings.value
+ json_settings : Dict = settings.value or dict()
+ self._configure_from_settings(json_settings)
+ for child in settings.children:
+ if isinstance(child.value, dict):
+ self._configure_from_settings(child.value)
if 'address_ip' in json_settings:
self.ipv4_address = json_settings['address_ip']
@@ -107,38 +155,61 @@ class EndpointComposer:
self.sub_interface_index = json_settings.get('index', 0)
- def get_config_rules(self, network_instance_name : str, delete : bool = False) -> List[Dict]:
+ def set_force_trunk(self, enable : bool = True) -> None:
+ self.force_trunk = enable
+
+ def _select_vlan_id(self, service_vlan_id : Optional[int]) -> Optional[int]:
+ if service_vlan_id is not None and service_vlan_id in self.explicit_vlan_ids:
+ return service_vlan_id
+ if len(self.explicit_vlan_ids) > 0:
+ return sorted(self.explicit_vlan_ids)[0]
+ return service_vlan_id
+
+ def get_config_rules(
+ self, network_instance_name : str, service_vlan_id : Optional[int] = None,
+ access_vlan_tagged : bool = False, delete : bool = False
+ ) -> List[Dict]:
if self.ipv4_address is None: return []
if self.ipv4_prefix_len is None: return []
json_config_rule = json_config_rule_delete if delete else json_config_rule_set
config_rules : List[Dict] = list()
+ vlan_id = None
+ if self.force_trunk or access_vlan_tagged or len(self.explicit_vlan_ids) > 0:
+ vlan_id = self._select_vlan_id(service_vlan_id)
+ if vlan_id is None:
+ LOGGER.warning('VLAN tagging requested but no vlan_id provided for endpoint={:s}'.format(self.uuid))
+ sub_interface_index = self.sub_interface_index
+
if network_instance_name != DEFAULT_NETWORK_INSTANCE:
config_rules.append(json_config_rule(*_network_instance_interface(
- network_instance_name, self.objekt.name, self.sub_interface_index
+ network_instance_name, self.objekt.name, sub_interface_index
)))
if delete:
config_rules.extend([
json_config_rule(*_interface(
- self.objekt.name, index=self.sub_interface_index, address_ip=None,
- address_prefix=None, enabled=False
+ self.objekt.name, index=sub_interface_index, address_ip=None,
+ address_prefix=None, enabled=None, vlan_id=vlan_id, mtu=None
)),
])
else:
config_rules.extend([
json_config_rule(*_interface(
- self.objekt.name, index=self.sub_interface_index, address_ip=self.ipv4_address,
- address_prefix=self.ipv4_prefix_len, enabled=True
+ self.objekt.name, index=sub_interface_index, address_ip=self.ipv4_address,
+ address_prefix=self.ipv4_prefix_len, enabled=True, vlan_id=vlan_id, mtu=self.mtu
)),
])
return config_rules
def dump(self) -> Dict:
return {
- 'index' : self.sub_interface_index,
- 'address_ip' : self.ipv4_address,
- 'address_prefix': self.ipv4_prefix_len,
+ 'index' : self.sub_interface_index,
+ 'address_ip' : self.ipv4_address,
+ 'address_prefix' : self.ipv4_prefix_len,
+ 'explicit_vlan_ids' : list(self.explicit_vlan_ids),
+ 'force_trunk' : self.force_trunk,
+ 'mtu' : self.mtu,
}
def __str__(self):
@@ -155,6 +226,10 @@ class DeviceComposer:
self.endpoints : Dict[str, EndpointComposer] = dict() # endpoint_uuid => EndpointComposer
self.connected : Set[str] = set()
self.static_routes : Dict[str, Dict[int, str]] = dict() # {prefix => {metric => next_hop}}
+ self.service_vlan_id : Optional[int] = None
+ self.access_vlan_tagged = False
+ self.vlan_ids : Set[int] = set()
+ self.interface_mtu : Dict[str, int] = dict()
def set_endpoint_alias(self, endpoint_name : str, endpoint_uuid : str) -> None:
self.aliases[endpoint_name] = endpoint_uuid
@@ -167,6 +242,7 @@ class DeviceComposer:
def configure(self, device_obj : Device, settings : Optional[TreeNode]) -> None:
self.objekt = device_obj
+ self.interface_mtu = dict()
for endpoint_obj in device_obj.device_endpoints:
endpoint_uuid = endpoint_obj.endpoint_id.endpoint_uuid.uuid
self.set_endpoint_alias(endpoint_obj.name, endpoint_uuid)
@@ -184,6 +260,8 @@ class DeviceComposer:
resource_value = json.loads(config_rule_custom.resource_value)
management = resource_value.get('management', False)
if management: mgmt_ifaces.add(if_name)
+ mtu = _safe_int(resource_value.get('mtu'))
+ if mtu is not None: self.interface_mtu[if_name] = mtu
# Find data plane interfaces
for config_rule in device_obj.device_config.config_rules:
@@ -216,6 +294,13 @@ class DeviceComposer:
next_hop = resource_value['next_hop']
self.static_routes.setdefault(prefix, dict())[metric] = next_hop
+ for if_name, mtu in self.interface_mtu.items():
+ if if_name in mgmt_ifaces: continue
+ if if_name not in self.aliases: continue
+ endpoint = self.get_endpoint(if_name)
+ if endpoint.mtu is None:
+ endpoint.mtu = mtu
+
if settings is None: return
json_settings : Dict = settings.value
static_routes : List[Dict] = json_settings.get('static_routes', [])
@@ -225,7 +310,15 @@ class DeviceComposer:
metric = static_route.get('metric', 0)
self.static_routes.setdefault(prefix, dict())[metric] = next_hop
- def get_config_rules(self, network_instance_name : str, delete : bool = False) -> List[Dict]:
+ def get_config_rules(
+ self, network_instance_name : str, service_vlan_id : Optional[int] = None,
+ access_vlan_tagged : bool = False, delete : bool = False
+ ) -> List[Dict]:
+ self.service_vlan_id = service_vlan_id
+ self.access_vlan_tagged = access_vlan_tagged
+ self.vlan_ids = set()
+ if self.service_vlan_id is not None:
+ self.vlan_ids.add(self.service_vlan_id)
SELECTED_DEVICES = {
DeviceTypeEnum.PACKET_POP.value,
DeviceTypeEnum.PACKET_ROUTER.value,
@@ -237,9 +330,22 @@ class DeviceComposer:
config_rules : List[Dict] = list()
if network_instance_name != DEFAULT_NETWORK_INSTANCE:
json_config_rule(*_network_instance(network_instance_name, 'L3VRF'))
+
for endpoint in self.endpoints.values():
- config_rules.extend(endpoint.get_config_rules(network_instance_name, delete=delete))
- if len(self.static_routes) > 0:
+ if endpoint.objekt is None:
+ continue
+ config_rules.extend(endpoint.get_config_rules(
+ network_instance_name, self.service_vlan_id,
+ access_vlan_tagged=self.access_vlan_tagged, delete=delete
+ ))
+ self.vlan_ids.update(endpoint.explicit_vlan_ids)
+
+ for vlan_id in sorted(self.vlan_ids):
+ vlan_name = 'tfs-vlan-{:s}'.format(str(vlan_id))
+ config_rules.append(json_config_rule(*_network_instance_vlan(
+ network_instance_name, vlan_id, vlan_name=vlan_name
+ )))
+ if len(self.static_routes) > 0 and not delete:
config_rules.append(
json_config_rule(*_network_instance_protocol_static(network_instance_name))
)
@@ -274,6 +380,15 @@ class ConfigRuleComposer:
self.objekt : Optional[Service] = None
self.aliases : Dict[str, str] = dict() # device_name => device_uuid
self.devices : Dict[str, DeviceComposer] = dict() # device_uuid => DeviceComposer
+ self.vlan_id : Optional[int] = None
+ self.access_vlan_tagged = False
+
+ def clean(self) -> None:
+ self.objekt : Optional[Service] = None
+ self.aliases : Dict[str, str] = dict() # device_name => device_uuid
+ self.devices : Dict[str, DeviceComposer] = dict() # device_uuid => DeviceComposer
+ self.vlan_id : Optional[int] = None
+ self.access_vlan_tagged = False
def set_device_alias(self, device_name : str, device_uuid : str) -> None:
self.aliases[device_name] = device_uuid
@@ -286,15 +401,34 @@ class ConfigRuleComposer:
def configure(self, service_obj : Service, settings : Optional[TreeNode]) -> None:
self.objekt = service_obj
+ self.vlan_id = None
+ self.access_vlan_tagged = False
if settings is None: return
- #json_settings : Dict = settings.value
- # For future use
+ json_settings : Dict = settings.value or dict()
+
+ if 'vlan_id' in json_settings:
+ self.vlan_id = _safe_int(json_settings['vlan_id'])
+ elif 'vlan-id' in json_settings:
+ self.vlan_id = _safe_int(json_settings['vlan-id'])
+
+ if 'access_vlan_tagged' in json_settings or 'access-vlan-tagged' in json_settings:
+ access_vlan_tagged = json_settings.get('access_vlan_tagged', json_settings.get('access-vlan-tagged'))
+ parsed = _safe_bool(access_vlan_tagged)
+ if parsed is None:
+ MSG = 'Invalid access_vlan_tagged value in service settings: {:s}'
+ LOGGER.warning(MSG.format(str(access_vlan_tagged)))
+ self.access_vlan_tagged = False
+ else:
+ self.access_vlan_tagged = parsed
def get_config_rules(
self, network_instance_name : str = NETWORK_INSTANCE, delete : bool = False
) -> Dict[str, List[Dict]]:
return {
- device_uuid : device.get_config_rules(network_instance_name, delete=delete)
+ device_uuid : device.get_config_rules(
+ network_instance_name, self.vlan_id,
+ access_vlan_tagged=self.access_vlan_tagged, delete=delete
+ )
for device_uuid, device in self.devices.items()
}
@@ -303,5 +437,7 @@ class ConfigRuleComposer:
'devices' : {
device_uuid : device.dump()
for device_uuid, device in self.devices.items()
- }
+ },
+ 'vlan_id': self.vlan_id,
+ 'access_vlan_tagged': self.access_vlan_tagged,
}
diff --git a/src/service/service/service_handlers/l3nm_gnmi_openconfig/L3NMGnmiOpenConfigServiceHandler.py b/src/service/service/service_handlers/l3nm_gnmi_openconfig/L3NMGnmiOpenConfigServiceHandler.py
index 4517327e17797c933fe1993d9435515ede45ff97..c0c1b950279dffa4ae521cb45573202a3657ebf9 100644
--- a/src/service/service/service_handlers/l3nm_gnmi_openconfig/L3NMGnmiOpenConfigServiceHandler.py
+++ b/src/service/service/service_handlers/l3nm_gnmi_openconfig/L3NMGnmiOpenConfigServiceHandler.py
@@ -26,6 +26,7 @@ from service.service.task_scheduler.TaskExecutor import TaskExecutor
from service.service.tools.EndpointIdFormatters import endpointids_to_raw
from .ConfigRuleComposer import ConfigRuleComposer
from .StaticRouteGenerator import StaticRouteGenerator
+from .VlanIdPropagator import VlanIdPropagator
LOGGER = logging.getLogger(__name__)
@@ -39,10 +40,13 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
self.__task_executor = task_executor
self.__settings_handler = SettingsHandler(service.service_config, **settings)
self.__config_rule_composer = ConfigRuleComposer()
+ self.__vlan_id_propagator = VlanIdPropagator(self.__config_rule_composer)
self.__static_route_generator = StaticRouteGenerator(self.__config_rule_composer)
self.__endpoint_map : Dict[Tuple[str, str], Tuple[str, str]] = dict()
def _compose_config_rules(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> None:
+ self.__config_rule_composer.clean()
+
if len(endpoints) % 2 != 0: raise Exception('Number of endpoints should be even')
service_settings = self.__settings_handler.get_service_settings()
@@ -65,9 +69,18 @@ class L3NMGnmiOpenConfigServiceHandler(_ServiceHandler):
self.__endpoint_map[(device_uuid, endpoint_uuid)] = (device_obj.name, endpoint_obj.name)
- LOGGER.debug('[pre] config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump())))
+ MSG = '[pre] config_rule_composer = {:s}'
+ LOGGER.debug(MSG.format(json.dumps(self.__config_rule_composer.dump())))
+
+ self.__vlan_id_propagator.compose(endpoints)
+
+ MSG = '[post-vlan] config_rule_composer = {:s}'
+ LOGGER.debug(MSG.format(json.dumps(self.__config_rule_composer.dump())))
+
self.__static_route_generator.compose(endpoints)
- LOGGER.debug('[post] config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump())))
+
+ MSG = '[post] config_rule_composer = {:s}'
+ LOGGER.debug(MSG.format(json.dumps(self.__config_rule_composer.dump())))
def _do_configurations(
self, config_rules_per_device : Dict[str, List[Dict]], endpoints : List[Tuple[str, str, Optional[str]]],
diff --git a/src/service/service/service_handlers/l3nm_gnmi_openconfig/VlanIdPropagator.py b/src/service/service/service_handlers/l3nm_gnmi_openconfig/VlanIdPropagator.py
new file mode 100644
index 0000000000000000000000000000000000000000..88fdb20f1d611901eda5e98fa58f984390e206bb
--- /dev/null
+++ b/src/service/service/service_handlers/l3nm_gnmi_openconfig/VlanIdPropagator.py
@@ -0,0 +1,89 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging
+from typing import List, Optional, Tuple
+from common.DeviceTypes import DeviceTypeEnum
+from .ConfigRuleComposer import ConfigRuleComposer, DeviceComposer
+
+LOGGER = logging.getLogger(__name__)
+
+ROUTER_TYPES = {
+ DeviceTypeEnum.PACKET_ROUTER.value,
+ DeviceTypeEnum.EMULATED_PACKET_ROUTER.value,
+ DeviceTypeEnum.PACKET_POP.value,
+ DeviceTypeEnum.PACKET_RADIO_ROUTER.value,
+ DeviceTypeEnum.EMULATED_PACKET_RADIO_ROUTER.value,
+}
+
+def _is_router_device(device : DeviceComposer) -> bool:
+ return device.objekt is not None and device.objekt.device_type in ROUTER_TYPES
+
+
+class VlanIdPropagator:
+ def __init__(self, config_rule_composer : ConfigRuleComposer) -> None:
+ self._config_rule_composer = config_rule_composer
+
+ def compose(self, connection_hop_list : List[Tuple[str, str, Optional[str]]]) -> None:
+ link_endpoints = self._compute_link_endpoints(connection_hop_list)
+ LOGGER.debug('link_endpoints = {:s}'.format(str(link_endpoints)))
+
+ self._propagate_vlan_id(link_endpoints)
+ LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self._config_rule_composer.dump())))
+
+ def _compute_link_endpoints(
+ self, connection_hop_list : List[Tuple[str, str, Optional[str]]]
+ ) -> List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]]:
+ # In some cases connection_hop_list might contain repeated endpoints, remove them here.
+ added_connection_hops = set()
+ filtered_connection_hop_list = list()
+ for connection_hop in connection_hop_list:
+ if connection_hop in added_connection_hops: continue
+ filtered_connection_hop_list.append(connection_hop)
+ added_connection_hops.add(connection_hop)
+ connection_hop_list = filtered_connection_hop_list
+
+ # In some cases connection_hop_list first and last items might be internal endpoints of
+ # devices instead of link endpoints. Filter those endpoints not reaching a new device.
+ if len(connection_hop_list) > 2 and connection_hop_list[0][0] == connection_hop_list[1][0]:
+ # same device on first 2 endpoints
+ connection_hop_list = connection_hop_list[1:]
+ if len(connection_hop_list) > 2 and connection_hop_list[-1][0] == connection_hop_list[-2][0]:
+ # same device on last 2 endpoints
+ connection_hop_list = connection_hop_list[:-1]
+
+ num_connection_hops = len(connection_hop_list)
+ if num_connection_hops % 2 != 0: raise Exception('Number of connection hops must be even')
+ if num_connection_hops < 4: raise Exception('Number of connection hops must be >= 4')
+
+ it_connection_hops = iter(connection_hop_list)
+ return list(zip(it_connection_hops, it_connection_hops))
+
+ def _propagate_vlan_id(
+ self, link_endpoints_list : List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]]
+ ) -> None:
+ for link_endpoints in link_endpoints_list:
+ device_endpoint_a, device_endpoint_b = link_endpoints
+
+ device_uuid_a, endpoint_uuid_a = device_endpoint_a[0:2]
+ device_a = self._config_rule_composer.get_device(device_uuid_a)
+ endpoint_a = device_a.get_endpoint(endpoint_uuid_a)
+
+ device_uuid_b, endpoint_uuid_b = device_endpoint_b[0:2]
+ device_b = self._config_rule_composer.get_device(device_uuid_b)
+ endpoint_b = device_b.get_endpoint(endpoint_uuid_b)
+
+ if _is_router_device(device_a) and _is_router_device(device_b):
+ endpoint_a.set_force_trunk()
+ endpoint_b.set_force_trunk()
diff --git a/src/service/tests/test_l3nm_gnmi_static_rule_gen/MockServiceHandler.py b/src/service/tests/test_l3nm_gnmi_static_rule_gen/MockServiceHandler.py
index 7abe201e0b9e4defe3192c971c0853909d3e82a6..2d95a5ede062f00399c0c2927345653840c68c26 100644
--- a/src/service/tests/test_l3nm_gnmi_static_rule_gen/MockServiceHandler.py
+++ b/src/service/tests/test_l3nm_gnmi_static_rule_gen/MockServiceHandler.py
@@ -26,6 +26,7 @@ from .MockTaskExecutor import MockTaskExecutor
from service.service.tools.EndpointIdFormatters import endpointids_to_raw
from service.service.service_handlers.l3nm_gnmi_openconfig.ConfigRuleComposer import ConfigRuleComposer
from service.service.service_handlers.l3nm_gnmi_openconfig.StaticRouteGenerator import StaticRouteGenerator
+from service.service.service_handlers.l3nm_gnmi_openconfig.VlanIdPropagator import VlanIdPropagator
LOGGER = logging.getLogger(__name__)
@@ -37,6 +38,7 @@ class MockServiceHandler(_ServiceHandler):
self.__task_executor = task_executor
self.__settings_handler = SettingsHandler(service.service_config, **settings)
self.__config_rule_composer = ConfigRuleComposer()
+ self.__vlan_id_propagator = VlanIdPropagator(self.__config_rule_composer)
self.__static_route_generator = StaticRouteGenerator(self.__config_rule_composer)
self.__endpoint_map : Dict[Tuple[str, str], Tuple[str, str]] = dict()
@@ -94,8 +96,10 @@ class MockServiceHandler(_ServiceHandler):
#prev_endpoint = _endpoint
#prev_endpoint_obj = endpoint_obj
+ self.__vlan_id_propagator.compose(endpoints)
+ LOGGER.debug('[post-vlan] config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump())))
self.__static_route_generator.compose(endpoints)
- LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump())))
+ LOGGER.debug('[post] config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump())))
def _do_configurations(
self, config_rules_per_device : Dict[str, List[Dict]], endpoints : List[Tuple[str, str, Optional[str]]],
diff --git a/src/service/tests/test_l3nm_gnmi_static_rule_gen/test_tagged_access_subinterface.py b/src/service/tests/test_l3nm_gnmi_static_rule_gen/test_tagged_access_subinterface.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc096aae1048a1bcdd34f4d3375a2d47deed7319
--- /dev/null
+++ b/src/service/tests/test_l3nm_gnmi_static_rule_gen/test_tagged_access_subinterface.py
@@ -0,0 +1,55 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import importlib.util
+from pathlib import Path
+from types import SimpleNamespace
+
+
+MODULE_PATH = (
+ Path(__file__).resolve().parents[3]
+ / 'service'
+ / 'service'
+ / 'service_handlers'
+ / 'l3nm_gnmi_openconfig'
+ / 'ConfigRuleComposer.py'
+)
+MODULE_SPEC = importlib.util.spec_from_file_location('ConfigRuleComposer', MODULE_PATH)
+assert MODULE_SPEC is not None
+assert MODULE_SPEC.loader is not None
+CONFIG_RULE_COMPOSER = importlib.util.module_from_spec(MODULE_SPEC)
+MODULE_SPEC.loader.exec_module(CONFIG_RULE_COMPOSER)
+EndpointComposer = CONFIG_RULE_COMPOSER.EndpointComposer
+
+
+def test_tagged_access_preserves_subinterface_index() -> None:
+ endpoint = EndpointComposer('endpoint-uuid')
+ endpoint.objekt = SimpleNamespace(name='Ethernet11')
+ endpoint.ipv4_address = '172.17.1.1'
+ endpoint.ipv4_prefix_len = 24
+ endpoint.sub_interface_index = 0
+ endpoint.explicit_vlan_ids = {125}
+
+ config_rules = endpoint.get_config_rules(
+ 'default', service_vlan_id=125, access_vlan_tagged=False, delete=False
+ )
+
+ assert len(config_rules) == 1
+ config_rule = config_rules[0]
+ assert config_rule['custom']['resource_key'] == '/interface[Ethernet11]/subinterface[0]'
+
+ resource_value = json.loads(config_rule['custom']['resource_value'])
+ assert resource_value['index'] == 0
+ assert resource_value['vlan_id'] == 125
diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml
index 01fb05eeaf0b5b4b0bd9945da223141df5e6ba4e..0a5c7bf5a235ab3e3558883c24fc6f05a83fcd3e 100644
--- a/src/tests/.gitlab-ci.yml
+++ b/src/tests/.gitlab-ci.yml
@@ -28,6 +28,7 @@ include:
- local: '/src/tests/qkd_end2end/.gitlab-ci.yml'
- local: '/src/tests/acl_end2end/.gitlab-ci.yml'
- local: '/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml'
+ - local: '/src/tests/osm_end2end/.gitlab-ci.yml'
- local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml'
- local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml'
diff --git a/src/tests/ecoc22/tests/Objects.py b/src/tests/ecoc22/tests/Objects.py
index 8d4874ea76bc276a9a9215d9b4f5fae3445ea123..77a4a3f605640f478bae63d34e1d1269f7c4c664 100644
--- a/src/tests/ecoc22/tests/Objects.py
+++ b/src/tests/ecoc22/tests/Objects.py
@@ -33,10 +33,22 @@ EP_ID_DC2_BKP = json_endpoint_id(DEV_ID_DC2, 'eth2')
DEV_ID_CS2GW1 = json_device_id('CS2-GW1')
DEV_ID_CS2GW2 = json_device_id('CS2-GW2')
-WIM_SEP_DC1_PRI, WIM_MAP_DC1_PRI = wim_mapping(SITE_ID_DC1, EP_ID_DC1_PRI, DEV_ID_CS1GW1, priority=10, redundant=['DC1:DC1-GW:eth2'])
-WIM_SEP_DC1_BKP, WIM_MAP_DC1_BKP = wim_mapping(SITE_ID_DC1, EP_ID_DC1_BKP, DEV_ID_CS1GW2, priority=20, redundant=['DC1:DC1-GW:eth1'])
-WIM_SEP_DC2_PRI, WIM_MAP_DC2_PRI = wim_mapping(SITE_ID_DC2, EP_ID_DC2_PRI, DEV_ID_CS2GW1, priority=10, redundant=['DC2:DC2-GW:eth2'])
-WIM_SEP_DC2_BKP, WIM_MAP_DC2_BKP = wim_mapping(SITE_ID_DC2, EP_ID_DC2_BKP, DEV_ID_CS2GW2, priority=20, redundant=['DC2:DC2-GW:eth1'])
+WIM_SEP_DC1_PRI, WIM_MAP_DC1_PRI = wim_mapping(
+ SITE_ID_DC1, EP_ID_DC1_PRI, pe_device_id=DEV_ID_CS1GW1,
+ bearer_prefix='ECOC22', priority=10, redundant=['DC1:DC1-GW:eth2']
+)
+WIM_SEP_DC1_BKP, WIM_MAP_DC1_BKP = wim_mapping(
+ SITE_ID_DC1, EP_ID_DC1_BKP, pe_device_id=DEV_ID_CS1GW2,
+ bearer_prefix='ECOC22', priority=20, redundant=['DC1:DC1-GW:eth1']
+)
+WIM_SEP_DC2_PRI, WIM_MAP_DC2_PRI = wim_mapping(
+ SITE_ID_DC2, EP_ID_DC2_PRI, pe_device_id=DEV_ID_CS2GW1,
+ bearer_prefix='ECOC22', priority=10, redundant=['DC2:DC2-GW:eth2']
+)
+WIM_SEP_DC2_BKP, WIM_MAP_DC2_BKP = wim_mapping(
+ SITE_ID_DC2, EP_ID_DC2_BKP, pe_device_id=DEV_ID_CS2GW2,
+ bearer_prefix='ECOC22', priority=20, redundant=['DC2:DC2-GW:eth1']
+)
WIM_MAPPING = [
WIM_MAP_DC1_PRI, WIM_MAP_DC1_BKP,
diff --git a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json
index 95bea42eacba9440fc5e68a035d3a9ff5a3e59a5..0179de490680bb45e8b634a15091e9658661e4a2 100644
--- a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json
+++ b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json
@@ -35,6 +35,7 @@
"service": {"svc-mtu": 1400},
"connection": {
"encapsulation-type": "vlan",
+ "eth-inf-type": "ietf-l2vpn-svc:tagged",
"tagged-interface": {
"type": "ietf-l2vpn-svc:dot1q",
"dot1q-vlan-tagged": {"cvlan-id": 125}
@@ -65,6 +66,7 @@
"service": {"svc-mtu": 1400},
"connection": {
"encapsulation-type": "vlan",
+ "eth-inf-type": "ietf-l2vpn-svc:tagged",
"tagged-interface": {
"type": "ietf-l2vpn-svc:dot1q",
"dot1q-vlan-tagged": {"cvlan-id": 125}
diff --git a/src/tests/oeccpsc22/tests/Objects_Service.py b/src/tests/oeccpsc22/tests/Objects_Service.py
index 3440c5515a85099d459c1ebe4f06b63dce2333e6..ad76422c6c437c9153744c52d24cf3c128fa6594 100644
--- a/src/tests/oeccpsc22/tests/Objects_Service.py
+++ b/src/tests/oeccpsc22/tests/Objects_Service.py
@@ -21,14 +21,14 @@ WIM_SEP_D1R1_ID = compose_service_endpoint_id(D1_ENDPOINT_IDS[D1_DEVICE
WIM_SEP_D1R1_ROUTER_ID = '10.10.10.1'
WIM_SEP_D1R1_ROUTER_DIST = '65000:111'
WIM_SEP_D1R1_SITE_ID = '1'
-WIM_SEP_D1R1_BEARER = compose_bearer(D1_ENDPOINT_IDS[D1_DEVICE_D1R1_UUID]['3/1'])
+WIM_SEP_D1R1_BEARER = compose_bearer(D1_ENDPOINT_IDS[D1_DEVICE_D1R1_UUID]['3/1'], 'OECCPSC22D1')
WIM_SRV_D1R1_VLAN_ID = 400
WIM_SEP_D2R4_ID = compose_service_endpoint_id(D2_ENDPOINT_IDS[D2_DEVICE_D2R4_UUID]['3/3'])
WIM_SEP_D2R4_ROUTER_ID = '20.20.20.1'
WIM_SEP_D2R4_ROUTER_DIST = '65000:222'
WIM_SEP_D2R4_SITE_ID = '2'
-WIM_SEP_D2R4_BEARER = compose_bearer(D2_ENDPOINT_IDS[D2_DEVICE_D2R4_UUID]['3/3'])
+WIM_SEP_D2R4_BEARER = compose_bearer(D2_ENDPOINT_IDS[D2_DEVICE_D2R4_UUID]['3/3'], 'OECCPSC22D2')
WIM_SRV_D2R4_VLAN_ID = 500
WIM_USERNAME = 'admin'
diff --git a/src/tests/oeccpsc22/tests/Tools.py b/src/tests/oeccpsc22/tests/Tools.py
index 8e97fa4030ad2105ff841b0d7604c7c59fbe7945..79ec44f9f6e13cc970d6df4b20cf63fda5811257 100644
--- a/src/tests/oeccpsc22/tests/Tools.py
+++ b/src/tests/oeccpsc22/tests/Tools.py
@@ -33,7 +33,7 @@ def compose_service_endpoint_id(endpoint_id):
endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
return ':'.join([device_uuid, endpoint_uuid])
-def compose_bearer(endpoint_id):
+def compose_bearer(endpoint_id : Dict, bearer_prefix : str):
device_uuid = endpoint_id['device_id']['device_uuid']['uuid']
endpoint_uuid = endpoint_id['endpoint_uuid']['uuid']
- return ':'.join([device_uuid, endpoint_uuid])
+ return ':'.join([bearer_prefix, device_uuid, endpoint_uuid])
diff --git a/src/tests/ofc22/.gitlab-ci.yml b/src/tests/ofc22/.gitlab-ci.yml
index 6c3b65f4ac7d8b3432024c3b790bb11540e999e6..48de160a2fdce80144a18a7eb8f9d980cc2f80e0 100644
--- a/src/tests/ofc22/.gitlab-ci.yml
+++ b/src/tests/ofc22/.gitlab-ci.yml
@@ -141,7 +141,7 @@ end2end_test ofc22:
- ./deploy/nats.sh
- ./deploy/kafka.sh
- ./deploy/qdb.sh
- - ./deploy/expose_dashboard.sh
+ #- ./deploy/expose_dashboard.sh
- ./deploy/tfs.sh
- ./deploy/show.sh
@@ -160,14 +160,16 @@ end2end_test ofc22:
after_script:
# Dump TeraFlowSDN component logs
- source src/tests/${TEST_NAME}/deploy_specs.sh
- - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server
- - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/deviceservice -c server
- - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/pathcompservice -c frontend
- - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/serviceservice -c server
- - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/sliceservice -c server
- - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/nbiservice -c server
- - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringservice -c server
- - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/ztpservice -c ztpservice
+ - rm -rf src/tests/${TEST_NAME}/component_logs
+ - mkdir -p src/tests/${TEST_NAME}/component_logs
+ - kubectl logs --namespace $TFS_K8S_NAMESPACE deployment/contextservice -c server > src/tests/${TEST_NAME}/component_logs/contextservice.log 2>&1 || true
+ - kubectl logs --namespace $TFS_K8S_NAMESPACE deployment/deviceservice -c server > src/tests/${TEST_NAME}/component_logs/deviceservice.log 2>&1 || true
+ - kubectl logs --namespace $TFS_K8S_NAMESPACE deployment/pathcompservice -c frontend > src/tests/${TEST_NAME}/component_logs/pathcompservice-frontend.log 2>&1 || true
+ - kubectl logs --namespace $TFS_K8S_NAMESPACE deployment/serviceservice -c server > src/tests/${TEST_NAME}/component_logs/serviceservice.log 2>&1 || true
+ - kubectl logs --namespace $TFS_K8S_NAMESPACE deployment/sliceservice -c server > src/tests/${TEST_NAME}/component_logs/sliceservice.log 2>&1 || true
+ - kubectl logs --namespace $TFS_K8S_NAMESPACE deployment/nbiservice -c server > src/tests/${TEST_NAME}/component_logs/nbiservice.log 2>&1 || true
+ - kubectl logs --namespace $TFS_K8S_NAMESPACE deployment/monitoringservice -c server > src/tests/${TEST_NAME}/component_logs/monitoringservice.log 2>&1 || true
+ - kubectl logs --namespace $TFS_K8S_NAMESPACE deployment/ztpservice -c ztpservice > src/tests/${TEST_NAME}/component_logs/ztpservice.log 2>&1 || true
# Clean up
- kubectl delete namespaces tfs || true
@@ -186,3 +188,5 @@ end2end_test ofc22:
when: always
reports:
junit: ./src/tests/${TEST_NAME}/report_*.xml
+ paths:
+ - ./src/tests/${TEST_NAME}/component_logs/*.log
diff --git a/src/tests/ofc22/tests/Objects.py b/src/tests/ofc22/tests/Objects.py
index a2a68a1749ceb1bb3641864479b770bc69734b39..53eb67b2abf4e7c37b16bc4a790d406bf00d27be 100644
--- a/src/tests/ofc22/tests/Objects.py
+++ b/src/tests/ofc22/tests/Objects.py
@@ -26,8 +26,8 @@ SITE_ID_DC2 = '2'
DEV_ID_DC2 = json_device_id('R3-EMU')
EP_ID_DC2 = json_endpoint_id(DEV_ID_DC2, '13/1/2')
-WIM_SEP_DC1, WIM_MAP_DC1 = wim_mapping(SITE_ID_DC1, EP_ID_DC1)
-WIM_SEP_DC2, WIM_MAP_DC2 = wim_mapping(SITE_ID_DC2, EP_ID_DC2)
+WIM_SEP_DC1, WIM_MAP_DC1 = wim_mapping(SITE_ID_DC1, EP_ID_DC1, bearer_prefix='OFC22')
+WIM_SEP_DC2, WIM_MAP_DC2 = wim_mapping(SITE_ID_DC2, EP_ID_DC2, bearer_prefix='OFC22')
WIM_MAPPING = [
WIM_MAP_DC1,
diff --git a/src/tests/ofc25/.gitlab-ci.yml b/src/tests/ofc25/.gitlab-ci.yml
index c7b5ef9afb5340503fef14baf6119d34f13e1057..f7905ce0c703c7de869f6144dfe69464b0dd9571 100644
--- a/src/tests/ofc25/.gitlab-ci.yml
+++ b/src/tests/ofc25/.gitlab-ci.yml
@@ -166,10 +166,10 @@ end2end_test ofc25:
# Configure TeraFlowSDN deployment
# Uncomment if DEBUG log level is needed for the components
- - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml
- - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml
- - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml
- - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml
+ #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml
+ #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml
+ #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml
+ #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml
#- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/sliceservice.yaml
#- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml
#- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/opticalcontrollerservice.yaml
diff --git a/src/tests/osm_end2end/.gitignore b/src/tests/osm_end2end/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..7e04d0164cb25c23d8cc2bd5135b70bd62510fd2
--- /dev/null
+++ b/src/tests/osm_end2end/.gitignore
@@ -0,0 +1,21 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+clab-*/
+images/
+*.clab.yml.bak
+*.tar
+*.tar.gz
+local_results/
+gnmic_lab/results/
diff --git a/src/tests/osm_end2end/.gitlab-ci.yml b/src/tests/osm_end2end/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0e439b82bbe38c0fa5062127422d0f78850e488a
--- /dev/null
+++ b/src/tests/osm_end2end/.gitlab-ci.yml
@@ -0,0 +1,308 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build osm_end2end:
+ variables:
+ TEST_NAME: 'osm_end2end'
+ stage: build
+ before_script:
+ - docker image prune --force
+ - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+ script:
+ - docker buildx build -t "${TEST_NAME}:latest" -f ./src/tests/${TEST_NAME}/Dockerfile .
+ - docker tag "${TEST_NAME}:latest" "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest"
+ - docker push "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest"
+ after_script:
+ - docker image prune --force
+ rules:
+ - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+ - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+ - changes:
+ - src/common/**/*.py
+ - proto/*.proto
+ - src/tests/${TEST_NAME}/**/*.{py,in,sh,yml,json,cfg}
+ - src/tests/${TEST_NAME}/Dockerfile
+ - .gitlab-ci.yml
+
+# Deploy TeraFlowSDN and Execute end-2-end test
+end2end_test osm_end2end:
+ timeout: 45m
+ variables:
+ TEST_NAME: 'osm_end2end'
+ stage: end2end_test
+ # Disable to force running it after all other tasks
+ #needs:
+ # - build osm_end2end
+ before_script:
+ # Cleanup old ContainerLab scenarios
+ - containerlab destroy --all --cleanup || true
+
+ # Do Docker cleanup
+ - docker ps --all --quiet | xargs --no-run-if-empty docker stop
+ - docker container prune --force
+ - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force
+ - docker image prune --force
+ - docker network prune --force
+ - docker volume prune --all --force
+ - docker buildx prune --force
+
+ # Check MicroK8s is ready
+ - microk8s status --wait-ready
+ - LOOP_MAX_ATTEMPTS=10
+ - LOOP_COUNTER=0
+ - >
+ while ! kubectl get pods --all-namespaces &> /dev/null; do
+ printf "%c" "."
+ sleep 1
+ LOOP_COUNTER=$((LOOP_COUNTER + 1))
+ if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then
+ echo "Max attempts reached, exiting the loop."
+ exit 1
+ fi
+ done
+ - kubectl get pods --all-namespaces
+
+ # Always delete Kubernetes namespaces
+ - export K8S_NAMESPACES=$(kubectl get namespace -o jsonpath='{.items[*].metadata.name}')
+ - echo "K8S_NAMESPACES=${K8S_NAMESPACES}"
+
+ - export OLD_NATS_NAMESPACES=$(echo "${K8S_NAMESPACES}" | tr ' ' '\n' | grep -E '^nats')
+ - echo "OLD_NATS_NAMESPACES=${OLD_NATS_NAMESPACES}"
+ - >
+ for ns in ${OLD_NATS_NAMESPACES}; do
+ if [[ "$ns" == nats* ]]; then
+ if helm3 status "$ns" &>/dev/null; then
+ helm3 uninstall "$ns" -n "$ns"
+ else
+ echo "Release '$ns' not found, skipping..."
+ fi
+ fi
+ done
+ - export OLD_NAMESPACES=$(echo "${K8S_NAMESPACES}" | tr ' ' '\n' | grep -E '^(tfs|crdb|qdb|kafka|nats)')
+ - echo "OLD_NAMESPACES=${OLD_NAMESPACES}"
+ - kubectl delete namespace ${OLD_NAMESPACES} || true
+
+ # Clean-up Kubernetes Failed pods
+ - >
+ kubectl get pods --all-namespaces --no-headers --field-selector=status.phase=Failed
+ -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name |
+ xargs --no-run-if-empty --max-args=2 kubectl delete pod --namespace
+
+ # Login Docker repository
+ - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+
+ script:
+ # Download Docker image to run the test
+ - docker pull "${CI_REGISTRY_IMAGE}/${TEST_NAME}:latest"
+
+ # Check MicroK8s is ready
+ - microk8s status --wait-ready
+ - LOOP_MAX_ATTEMPTS=10
+ - LOOP_COUNTER=0
+ - >
+ while ! kubectl get pods --all-namespaces &> /dev/null; do
+ printf "%c" "."
+ sleep 1
+ LOOP_COUNTER=$((LOOP_COUNTER + 1))
+ if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then
+ echo "Max attempts reached, exiting the loop."
+ exit 1
+ fi
+ done
+ - kubectl get pods --all-namespaces
+
+ # Deploy ContainerLab Scenario
+ - RUNNER_PATH=`pwd`
+ #- cd $PWD/src/tests/${TEST_NAME}
+ - mkdir -p /tmp/clab/${TEST_NAME}
+ - cp -R src/tests/${TEST_NAME}/clab/* /tmp/clab/${TEST_NAME}
+ - tree -la /tmp/clab/${TEST_NAME}
+ - cd /tmp/clab/${TEST_NAME}
+ - containerlab deploy --reconfigure --topo ${TEST_NAME}.clab.yml
+ - cd $RUNNER_PATH
+
+ # Wait for initialization of Device NOSes
+ - sleep 3
+ - docker ps -a
+
+ # Dump configuration of the routers (before any configuration)
+ - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
+ - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
+ - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
+
+ # Configure TeraFlowSDN deployment
+ # Uncomment if DEBUG log level is needed for the components
+ #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml
+ #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml
+ #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml
+ #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml
+ #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml
+
+ - source src/tests/${TEST_NAME}/deploy_specs.sh
+ #- export TFS_REGISTRY_IMAGES="${CI_REGISTRY_IMAGE}"
+ #- export TFS_SKIP_BUILD="YES"
+ #- export TFS_IMAGE_TAG="latest"
+ #- echo "TFS_REGISTRY_IMAGES=${CI_REGISTRY_IMAGE}"
+
+ # Deploy TeraFlowSDN
+ - ./deploy/crdb.sh
+ - ./deploy/nats.sh
+ - ./deploy/kafka.sh
+ #- ./deploy/qdb.sh
+ - ./deploy/tfs.sh
+ - ./deploy/show.sh
+
+ ## Wait for Context to be subscribed to NATS
+ ## WARNING: this loop is infinite if there is no subscriber (such as monitoring).
+ ## Investigate if we can use a counter to limit the number of iterations.
+ ## For now, keep it commented out.
+ #- LOOP_MAX_ATTEMPTS=180
+ #- LOOP_COUNTER=0
+ #- >
+ # while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do
+ # echo "Attempt: $LOOP_COUNTER"
+ # kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1;
+ # sleep 1;
+ # LOOP_COUNTER=$((LOOP_COUNTER + 1))
+ # if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then
+ # echo "Max attempts reached, exiting the loop."
+ # break
+ # fi
+ # done
+ - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server
+
+ - |
+ dump_router_configs() {
+ local LABEL=$1
+ echo "==== ${LABEL} ===="
+ containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
+ containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
+ containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
+ }
+
+ ping_check() {
+ local SRC=$1 DST_IP=$2 PATTERN=$3
+ local OUTPUT
+ OUTPUT=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=${SRC} --cmd "ping -n -c3 ${DST_IP}" --format json)
+ echo "$OUTPUT"
+ if echo "$OUTPUT" | grep -E "$PATTERN" >/dev/null; then
+ echo "PASSED ${SRC}->${DST_IP}"
+ return 0
+ fi
+ echo "FAILED ${SRC}->${DST_IP}"
+ return 1
+ }
+
+ assert_no_connectivity() {
+ local SRC=$1 LOCAL_IP=$2 LOCAL_GW=$3 REMOTE_GW=$4 REMOTE_IP=$5
+ ping_check "${SRC}" "${LOCAL_IP}" "3 packets transmitted, 3 received, 0% packet loss"
+ ping_check "${SRC}" "${LOCAL_GW}" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss"
+ ping_check "${SRC}" "${REMOTE_GW}" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss"
+ ping_check "${SRC}" "${REMOTE_IP}" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss"
+ }
+
+ assert_connectivity() {
+ local SRC=$1 LOCAL_IP=$2 LOCAL_GW=$3 REMOTE_GW=$4 REMOTE_IP=$5
+ ping_check "${SRC}" "${LOCAL_IP}" "3 packets transmitted, 3 received, 0% packet loss"
+ ping_check "${SRC}" "${LOCAL_GW}" "3 packets transmitted, 3 received, 0% packet loss"
+ ping_check "${SRC}" "${REMOTE_GW}" "3 packets transmitted, 3 received, 0% packet loss"
+ ping_check "${SRC}" "${REMOTE_IP}" "3 packets transmitted, 3 received, 0% packet loss"
+ }
+
+ run_osm_test() {
+ local ACTION=$1 VARIANT=$2
+ docker run -t --rm --name "${TEST_NAME}-${VARIANT}-${ACTION}" --network=host \
+ --env OSM_SERVICE_VARIANT="${VARIANT}" \
+ --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" \
+ --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" \
+ "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest" "/var/teraflow/run-osm-service-${ACTION}.sh"
+ }
+
+ run_variant_cycle() {
+ local VARIANT=$1 SRC=$2 LOCAL_IP=$3 LOCAL_GW=$4 REMOTE_GW=$5 REMOTE_IP=$6
+
+ echo "==== Starting ${VARIANT} OSM service cycle ===="
+ assert_no_connectivity "${SRC}" "${LOCAL_IP}" "${LOCAL_GW}" "${REMOTE_GW}" "${REMOTE_IP}"
+
+ run_osm_test create "${VARIANT}"
+ sleep 60
+ dump_router_configs "after configuring ${VARIANT} OSM service"
+ assert_connectivity "${SRC}" "${LOCAL_IP}" "${LOCAL_GW}" "${REMOTE_GW}" "${REMOTE_IP}"
+
+ run_osm_test remove "${VARIANT}"
+ sleep 60
+ dump_router_configs "after removing ${VARIANT} OSM service"
+ assert_no_connectivity "${SRC}" "${LOCAL_IP}" "${LOCAL_GW}" "${REMOTE_GW}" "${REMOTE_IP}"
+ }
+
+ # Run end-to-end test: onboard scenario
+ docker run -t --rm --name ${TEST_NAME}-onboarding --network=host \
+ --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" \
+ --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" \
+ $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-onboarding.sh
+
+ dump_router_configs "after onboarding scenario"
+
+ # Run end-to-end test: first untagged connectivity, then tagged connectivity
+ run_variant_cycle "untagged" "dc1_untagged" "172.16.1.10" "172.16.1.1" "172.16.3.1" "172.16.3.10"
+ run_variant_cycle "tagged" "dc3_tagged" "172.17.1.10" "172.17.1.1" "172.17.3.1" "172.17.3.10"
+
+ # Run end-to-end test: cleanup scenario
+ docker run -t --rm --name ${TEST_NAME}-cleanup --network=host \
+ --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" \
+ --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" \
+ $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-cleanup.sh
+
+ after_script:
+ # Dump configuration of the routers (on after_script)
+ - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
+ - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
+ - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
+
+ # Dump TeraFlowSDN component logs
+ - source src/tests/${TEST_NAME}/deploy_specs.sh
+ - rm -rf src/tests/${TEST_NAME}/component_logs
+ - mkdir -p src/tests/${TEST_NAME}/component_logs
+ - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server > src/tests/${TEST_NAME}/component_logs/contextservice.log 2>&1 || true
+ - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/deviceservice -c server > src/tests/${TEST_NAME}/component_logs/deviceservice.log 2>&1 || true
+ - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/pathcompservice -c frontend > src/tests/${TEST_NAME}/component_logs/pathcompservice-frontend.log 2>&1 || true
+ - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/serviceservice -c server > src/tests/${TEST_NAME}/component_logs/serviceservice.log 2>&1 || true
+ - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/nbiservice -c server > src/tests/${TEST_NAME}/component_logs/nbiservice.log 2>&1 || true
+
+ # Clean up
+ - RUNNER_PATH=`pwd`
+ #- cd $PWD/src/tests/${TEST_NAME}
+ - cd /tmp/clab/${TEST_NAME}
+ - containerlab destroy --topo ${TEST_NAME}.clab.yml --cleanup || true
+ - sudo rm -rf clab-${TEST_NAME}/ .${TEST_NAME}.clab.yml.bak || true
+ - cd $RUNNER_PATH
+ - kubectl delete namespaces tfs || true
+ - docker ps --all --quiet | xargs --no-run-if-empty docker stop
+ - docker container prune --force
+ - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force
+ - docker network prune --force
+ - docker volume prune --all --force
+ - docker image prune --force
+
+ #coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+ rules:
+ - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+ - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+ artifacts:
+ when: always
+ reports:
+ junit: ./src/tests/${TEST_NAME}/report_*.xml
+ paths:
+ - ./src/tests/${TEST_NAME}/component_logs/*.log
diff --git a/src/tests/osm_end2end/Dockerfile b/src/tests/osm_end2end/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..5b59ca0febf48edeeb95ab1c4fdcb05584dae9e8
--- /dev/null
+++ b/src/tests/osm_end2end/Dockerfile
@@ -0,0 +1,86 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:3.9-slim
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+ apt-get --yes --quiet --quiet install wget g++ git && \
+ rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade 'pip==25.2'
+RUN python3 -m pip install --upgrade 'setuptools==79.0.0' 'wheel==0.45.1'
+RUN python3 -m pip install --upgrade 'pip-tools==7.3.0'
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/^(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/tests/osm_end2end
+WORKDIR /var/teraflow/tests/osm_end2end
+COPY src/tests/osm_end2end/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/__init__.py ./__init__.py
+COPY src/common/*.py ./common/
+COPY src/common/tests/. ./common/tests/
+COPY src/common/tools/. ./common/tools/
+COPY src/context/__init__.py context/__init__.py
+COPY src/context/client/. context/client/
+COPY src/device/__init__.py device/__init__.py
+COPY src/device/client/. device/client/
+COPY src/monitoring/__init__.py monitoring/__init__.py
+COPY src/monitoring/client/. monitoring/client/
+COPY src/service/__init__.py service/__init__.py
+COPY src/service/client/. service/client/
+COPY src/slice/__init__.py slice/__init__.py
+COPY src/slice/client/. slice/client/
+COPY src/vnt_manager/__init__.py vnt_manager/__init__.py
+COPY src/vnt_manager/client/. vnt_manager/client/
+COPY src/tests/*.py ./tests/
+COPY src/tests/osm_end2end/__init__.py ./tests/osm_end2end/__init__.py
+COPY src/tests/osm_end2end/data/. ./tests/osm_end2end/data/
+COPY src/tests/osm_end2end/tests/. ./tests/osm_end2end/tests/
+COPY src/tests/osm_end2end/scripts/. ./
+
+RUN apt-get --yes --quiet --quiet update && \
+ apt-get --yes --quiet --quiet install tree && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN tree -la /var/teraflow
diff --git a/src/tests/osm_end2end/README.md b/src/tests/osm_end2end/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..8a165fa14120d672e6212ad9aaaceb85b886a8a1
--- /dev/null
+++ b/src/tests/osm_end2end/README.md
@@ -0,0 +1,179 @@
+# OSM Service End-to-End integration test
+
+## Run locally
+```bash
+cd ~/tfs-ctrl
+src/tests/osm_end2end/run-local.sh
+```
+
+Useful variants:
+```bash
+# Only prepare the environment and deploy the topology
+src/tests/osm_end2end/run-local.sh prepare build-image deploy-clab deploy-tfs onboarding
+
+# Reuse an existing TFS deployment and local test image
+OSM_E2E_BUILD_IMAGE=no OSM_E2E_DEPLOY_TFS=no src/tests/osm_end2end/run-local.sh untagged tagged
+
+# Dump TFS component logs after a failed local run
+src/tests/osm_end2end/run-local.sh logs
+```
+
+Useful environment variables:
+```bash
+OSM_E2E_IMAGE=osm_end2end:local
+OSM_E2E_CLEAN_START=yes
+OSM_E2E_BUILD_IMAGE=yes
+OSM_E2E_DEPLOY_TFS=yes
+OSM_E2E_CONTAINERLAB_USE_SUDO=yes
+KUBECTL_CMD=kubectl
+HELM_CMD=helm3
+MICROK8S_CMD=microk8s
+
+# If kubectl is only available through MicroK8s
+KUBECTL_CMD="microk8s kubectl"
+```
+
+Local results are written to `src/tests/osm_end2end/local_results/`.
+
+## Emulated DataPlane Deployment
+- ContainerLab
+- Scenario
+- Descriptor
+
+## TeraFlowSDN Deployment
+```bash
+cd ~/tfs-ctrl
+source ~/tfs-ctrl/src/tests/osm_end2end/deploy_specs.sh
+./deploy/all.sh
+```
+
+# ContainerLab - Arista cEOS - Commands
+
+## Download and install ContainerLab
+```bash
+sudo bash -c "$(curl -sL https://get.containerlab.dev)" -- -v 0.59.0
+```
+
+## Download Arista cEOS image and create Docker image
+```bash
+cd ~/tfs-ctrl/src/tests/osm_end2end/
+docker import arista/cEOS64-lab-4.33.5M.tar ceos:4.33.5M
+```
+
+## Deploy scenario
+```bash
+cd ~/tfs-ctrl/src/tests/osm_end2end/
+sudo containerlab deploy --topo osm_end2end.clab.yml
+```
+
+## Inspect scenario
+```bash
+cd ~/tfs-ctrl/src/tests/osm_end2end/
+sudo containerlab inspect --topo osm_end2end.clab.yml
+```
+
+## Destroy scenario
+```bash
+cd ~/tfs-ctrl/src/tests/osm_end2end/
+sudo containerlab destroy --topo osm_end2end.clab.yml
+sudo rm -rf clab-osm_end2end/ .osm_end2end.clab.yml.bak
+```
+
+## Access cEOS Bash/CLI
+```bash
+docker exec -it clab-osm_end2end-r1 bash
+docker exec -it clab-osm_end2end-r2 bash
+docker exec -it clab-osm_end2end-r3 bash
+docker exec -it clab-osm_end2end-r1 Cli
+docker exec -it clab-osm_end2end-r2 Cli
+docker exec -it clab-osm_end2end-r3 Cli
+```
+
+## Configure ContainerLab clients
+```bash
+docker exec -it clab-osm_end2end-dc1_untagged bash
+ ip link set address 00:c1:ab:00:01:0b dev eth1
+ ip link set eth1 up
+ ip address add 172.16.1.10/24 dev eth1
+ ip route add 172.16.3.0/24 via 172.16.1.1
+ ping 172.16.3.10
+
+docker exec -it clab-osm_end2end-dc2_untagged bash
+ ip link set address 00:c1:ab:00:02:0b dev eth1
+ ip link set eth1 up
+ ip address add 172.16.3.10/24 dev eth1
+ ip route add 172.16.1.0/24 via 172.16.3.1
+ ping 172.16.1.10
+
+docker exec -it clab-osm_end2end-dc3_tagged bash
+ ip link set address 00:c1:ab:00:01:0a dev eth1
+ ip link set eth1 up
+ ip link add link eth1 name eth1.125 type vlan id 125
+ ip address add 172.17.1.10/24 dev eth1.125
+ ip link set eth1.125 up
+ ip route add 172.17.3.0/24 via 172.17.1.1
+ ping 172.17.3.10
+
+docker exec -it clab-osm_end2end-dc4_tagged bash
+ ip link set address 00:c1:ab:00:02:0a dev eth1
+ ip link set eth1 up
+ ip link add link eth1 name eth1.125 type vlan id 125
+ ip address add 172.17.3.10/24 dev eth1.125
+ ip link set eth1.125 up
+ ip route add 172.17.1.0/24 via 172.17.3.1
+ ping 172.17.1.10
+```
+
+## Install gNMIc
+```bash
+sudo bash -c "$(curl -sL https://get-gnmic.kmrd.dev)"
+```
+
+## gNMI Capabilities request
+```bash
+gnmic --address clab-osm_end2end-r1 --port 6030 --username admin --password admin --insecure capabilities
+```
+
+## gNMI Get request
+```bash
+gnmic --address clab-osm_end2end-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path / > r1.json
+gnmic --address clab-osm_end2end-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path /interfaces/interface > r1-ifaces.json
+```
+
+## gNMI Set request
+```bash
+gnmic --address clab-osm_end2end-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --update-path /system/config/hostname --update-value srl11
+gnmic --address clab-osm_end2end-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path /system/config/hostname
+
+gnmic --address clab-osm_end2end-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set \
+--update-path '/network-instances/network-instance[name=default]/vlans/vlan[vlan-id=200]/config/vlan-id' --update-value 200 \
+--update-path '/interfaces/interface[name=Ethernet10]/config/name' --update-value '"Ethernet10"' \
+--update-path '/interfaces/interface[name=Ethernet10]/ethernet/switched-vlan/config/interface-mode' --update-value '"ACCESS"' \
+--update-path '/interfaces/interface[name=Ethernet10]/ethernet/switched-vlan/config/access-vlan' --update-value 200 \
+--update-path '/interfaces/interface[name=Ethernet2]/config/name' --update-value '"Ethernet2"' \
+--update-path '/interfaces/interface[name=Ethernet2]/ethernet/switched-vlan/config/interface-mode' --update-value '"TRUNK"'
+--update-path '/interfaces/interface[name=Ethernet2]/ethernet/switched-vlan/config/trunk-vlans' --update-value 200
+
+```
+
+## Subscribe request
+```bash
+gnmic --address clab-osm_end2end-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf subscribe --path /interfaces/interface[name=Management0]/state/
+
+# In another terminal, you can generate traffic opening SSH connection
+ssh admin@clab-osm_end2end-r1
+```
+
+# Check configurations done:
+```bash
+gnmic --address clab-osm_end2end-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path '/' > r1-all.json
+gnmic --address clab-osm_end2end-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path '/network-instances' > r1-nis.json
+gnmic --address clab-osm_end2end-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path '/interfaces' > r1-ifs.json
+```
+
+# Delete elements:
+```bash
+--address clab-osm_end2end-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/network-instances/network-instance[name=b19229e8]'
+--address clab-osm_end2end-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]'
+--address clab-osm_end2end-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]'
+```
diff --git a/src/tests/osm_end2end/__init__.py b/src/tests/osm_end2end/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ccc21c7db78aac26daa1f8c5ff8e1ffd3f35460
--- /dev/null
+++ b/src/tests/osm_end2end/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/osm_end2end/clab/osm_end2end.clab.yml b/src/tests/osm_end2end/clab/osm_end2end.clab.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e084a970d27075ec601aedefa502344fcb7af9e7
--- /dev/null
+++ b/src/tests/osm_end2end/clab/osm_end2end.clab.yml
@@ -0,0 +1,102 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TFS - Arista devices + Linux clients
+
+name: osm_end2end
+
+mgmt:
+ network: mgmt-net
+ ipv4-subnet: 172.20.20.0/24
+
+topology:
+ kinds:
+ arista_ceos:
+ kind: arista_ceos
+ #image: ceos:4.30.4M
+ #image: ceos:4.31.2F
+ #image: ceos:4.31.5M # tested, works
+ #image: ceos:4.32.0F
+ #image: ceos:4.33.5M
+ #image: ceos:4.34.4M
+ image: ceos:4.32.2F
+ #image: ceos:4.32.2.1F
+ #image: ceos:4.33.1F # does not work, libyang.util.LibyangError: failed to parse data tree: No module named "openconfig-platform-healthz" in the context.
+ linux:
+ kind: linux
+ image: ghcr.io/hellt/network-multitool:latest
+
+ nodes:
+ r1:
+ kind: arista_ceos
+ mgmt-ipv4: 172.20.20.101
+ startup-config: r1-startup.cfg
+
+ r2:
+ kind: arista_ceos
+ mgmt-ipv4: 172.20.20.102
+ startup-config: r2-startup.cfg
+
+ r3:
+ kind: arista_ceos
+ mgmt-ipv4: 172.20.20.103
+ startup-config: r3-startup.cfg
+
+ dc1_untagged:
+ kind: linux
+ mgmt-ipv4: 172.20.20.201
+ exec:
+ - ip link set address 00:c1:ab:00:01:0a dev eth1
+ - ip link set eth1 up
+ - ip address add 172.16.1.10/24 dev eth1
+ - ip route add 172.16.3.0/24 via 172.16.1.1
+
+ dc2_untagged:
+ kind: linux
+ mgmt-ipv4: 172.20.20.202
+ exec:
+ - ip link set address 00:c1:ab:00:02:0a dev eth1
+ - ip link set eth1 up
+ - ip address add 172.16.3.10/24 dev eth1
+ - ip route add 172.16.1.0/24 via 172.16.3.1
+
+ dc3_tagged:
+ kind: linux
+ mgmt-ipv4: 172.20.20.203
+ exec:
+ - ip link set address 00:c1:ab:00:03:0a dev eth1
+ - ip link set eth1 up
+ - ip link add link eth1 name eth1.125 type vlan id 125
+ - ip address add 172.17.1.10/24 dev eth1.125
+ - ip link set eth1.125 up
+ - ip route add 172.17.3.0/24 via 172.17.1.1
+
+ dc4_tagged:
+ kind: linux
+ mgmt-ipv4: 172.20.20.204
+ exec:
+ - ip link set address 00:c1:ab:00:04:0a dev eth1
+ - ip link set eth1 up
+ - ip link add link eth1 name eth1.125 type vlan id 125
+ - ip address add 172.17.3.10/24 dev eth1.125
+ - ip link set eth1.125 up
+ - ip route add 172.17.1.0/24 via 172.17.3.1
+
+ links:
+ - endpoints: ["r1:eth2", "r2:eth1"]
+ - endpoints: ["r2:eth3", "r3:eth2"]
+ - endpoints: ["r1:eth10", "dc1_untagged:eth1"]
+ - endpoints: ["r3:eth10", "dc2_untagged:eth1"]
+ - endpoints: ["r1:eth11", "dc3_tagged:eth1"]
+ - endpoints: ["r3:eth11", "dc4_tagged:eth1"]
diff --git a/src/tests/osm_end2end/clab/r1-startup.cfg b/src/tests/osm_end2end/clab/r1-startup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..ba65c2b55777babab0d95555a70dffd5e6877036
--- /dev/null
+++ b/src/tests/osm_end2end/clab/r1-startup.cfg
@@ -0,0 +1,50 @@
+! device: r1 (cEOSLab, EOS-4.32.2F-38195967.4322F (engineering build))
+!
+no aaa root
+!
+username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70
+!
+management api http-commands
+ no shutdown
+!
+no service interface inactive port-id allocation disabled
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname r1
+!
+spanning-tree mode mstp
+!
+system l1
+ unsupported speed action error
+ unsupported error-correction action error
+!
+management api gnmi
+ transport grpc default
+!
+management api netconf
+ transport ssh default
+!
+interface Ethernet2
+!
+interface Ethernet10
+!
+interface Ethernet11
+!
+interface Management0
+ ip address 172.20.20.101/24
+!
+ip routing
+!
+ip route 0.0.0.0/0 172.20.20.1
+!
+router multicast
+ ipv4
+ software-forwarding kernel
+ !
+ ipv6
+ software-forwarding kernel
+!
+end
diff --git a/src/tests/osm_end2end/clab/r2-startup.cfg b/src/tests/osm_end2end/clab/r2-startup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..e1ab661a0ab455ab22f025ec6b2f96cf42a9f2dd
--- /dev/null
+++ b/src/tests/osm_end2end/clab/r2-startup.cfg
@@ -0,0 +1,48 @@
+! device: r2 (cEOSLab, EOS-4.32.2F-38195967.4322F (engineering build))
+!
+no aaa root
+!
+username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70
+!
+management api http-commands
+ no shutdown
+!
+no service interface inactive port-id allocation disabled
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname r2
+!
+spanning-tree mode mstp
+!
+system l1
+ unsupported speed action error
+ unsupported error-correction action error
+!
+management api gnmi
+ transport grpc default
+!
+management api netconf
+ transport ssh default
+!
+interface Ethernet1
+!
+interface Ethernet3
+!
+interface Management0
+ ip address 172.20.20.102/24
+!
+ip routing
+!
+ip route 0.0.0.0/0 172.20.20.1
+!
+router multicast
+ ipv4
+ software-forwarding kernel
+ !
+ ipv6
+ software-forwarding kernel
+!
+end
diff --git a/src/tests/osm_end2end/clab/r3-startup.cfg b/src/tests/osm_end2end/clab/r3-startup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..13eb9f748d31b7e74a41c69c6cfe577ff3b9c758
--- /dev/null
+++ b/src/tests/osm_end2end/clab/r3-startup.cfg
@@ -0,0 +1,50 @@
+! device: r3 (cEOSLab, EOS-4.32.2F-38195967.4322F (engineering build))
+!
+no aaa root
+!
+username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70
+!
+management api http-commands
+ no shutdown
+!
+no service interface inactive port-id allocation disabled
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname r3
+!
+spanning-tree mode mstp
+!
+system l1
+ unsupported speed action error
+ unsupported error-correction action error
+!
+management api gnmi
+ transport grpc default
+!
+management api netconf
+ transport ssh default
+!
+interface Ethernet2
+!
+interface Ethernet10
+!
+interface Ethernet11
+!
+interface Management0
+ ip address 172.20.20.103/24
+!
+ip routing
+!
+ip route 0.0.0.0/0 172.20.20.1
+!
+router multicast
+ ipv4
+ software-forwarding kernel
+ !
+ ipv6
+ software-forwarding kernel
+!
+end
diff --git a/src/tests/osm_end2end/clab/target_config_tagged/r1-startup.cfg b/src/tests/osm_end2end/clab/target_config_tagged/r1-startup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..2965d3dd13fa92398877b64600b5b0850aea74f0
--- /dev/null
+++ b/src/tests/osm_end2end/clab/target_config_tagged/r1-startup.cfg
@@ -0,0 +1,64 @@
+! device: r1 (cEOSLab, EOS-4.32.2F-38195967.4322F (engineering build))
+!
+no aaa root
+!
+username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70
+!
+management api http-commands
+ no shutdown
+!
+no service interface inactive port-id allocation disabled
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname r1
+!
+spanning-tree mode mstp
+!
+system l1
+ unsupported speed action error
+ unsupported error-correction action error
+!
+vlan 125
+ name tfs-vlan-125
+!
+management api gnmi
+ transport grpc default
+!
+management api netconf
+ transport ssh default
+!
+interface Ethernet2
+ no switchport
+!
+interface Ethernet2.125
+ encapsulation dot1q vlan 125
+ ip address 10.254.172.69/30
+!
+interface Ethernet10
+!
+interface Ethernet11
+ no switchport
+!
+interface Ethernet11.125
+ encapsulation dot1q vlan 125
+ ip address 172.17.1.1/24
+!
+interface Management0
+ ip address 172.20.20.101/24
+!
+ip routing
+!
+ip route 0.0.0.0/0 172.20.20.1
+ip route 172.17.3.0/24 10.254.172.70 metric 1
+!
+router multicast
+ ipv4
+ software-forwarding kernel
+ !
+ ipv6
+ software-forwarding kernel
+!
+end
diff --git a/src/tests/osm_end2end/clab/target_config_tagged/r2-startup.cfg b/src/tests/osm_end2end/clab/target_config_tagged/r2-startup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..7a4968110f69b85a029d7b4f80c891c79cfee5bb
--- /dev/null
+++ b/src/tests/osm_end2end/clab/target_config_tagged/r2-startup.cfg
@@ -0,0 +1,63 @@
+! device: r2 (cEOSLab, EOS-4.32.2F-38195967.4322F (engineering build))
+!
+no aaa root
+!
+username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70
+!
+management api http-commands
+ no shutdown
+!
+no service interface inactive port-id allocation disabled
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname r2
+!
+spanning-tree mode mstp
+!
+system l1
+ unsupported speed action error
+ unsupported error-correction action error
+!
+vlan 125
+ name tfs-vlan-125
+!
+management api gnmi
+ transport grpc default
+!
+management api netconf
+ transport ssh default
+!
+interface Ethernet1
+ no switchport
+!
+interface Ethernet1.125
+ encapsulation dot1q vlan 125
+ ip address 10.254.172.70/30
+!
+interface Ethernet3
+ no switchport
+!
+interface Ethernet3.125
+ encapsulation dot1q vlan 125
+ ip address 10.254.187.117/30
+!
+interface Management0
+ ip address 172.20.20.102/24
+!
+ip routing
+!
+ip route 0.0.0.0/0 172.20.20.1
+ip route 172.17.1.0/24 10.254.172.69 metric 1
+ip route 172.17.3.0/24 10.254.187.118 metric 1
+!
+router multicast
+ ipv4
+ software-forwarding kernel
+ !
+ ipv6
+ software-forwarding kernel
+!
+end
diff --git a/src/tests/osm_end2end/clab/target_config_tagged/r3-startup.cfg b/src/tests/osm_end2end/clab/target_config_tagged/r3-startup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..65c9ed7f66af2a31a32a6be15409490a2cad7ab3
--- /dev/null
+++ b/src/tests/osm_end2end/clab/target_config_tagged/r3-startup.cfg
@@ -0,0 +1,64 @@
+! device: r3 (cEOSLab, EOS-4.32.2F-38195967.4322F (engineering build))
+!
+no aaa root
+!
+username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70
+!
+management api http-commands
+ no shutdown
+!
+no service interface inactive port-id allocation disabled
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname r3
+!
+spanning-tree mode mstp
+!
+system l1
+ unsupported speed action error
+ unsupported error-correction action error
+!
+vlan 125
+ name tfs-vlan-125
+!
+management api gnmi
+ transport grpc default
+!
+management api netconf
+ transport ssh default
+!
+interface Ethernet2
+ no switchport
+!
+interface Ethernet2.125
+ encapsulation dot1q vlan 125
+ ip address 10.254.187.118/30
+!
+interface Ethernet10
+!
+interface Ethernet11
+ no switchport
+!
+interface Ethernet11.125
+ encapsulation dot1q vlan 125
+ ip address 172.17.3.1/24
+!
+interface Management0
+ ip address 172.20.20.103/24
+!
+ip routing
+!
+ip route 0.0.0.0/0 172.20.20.1
+ip route 172.17.1.0/24 10.254.187.117 metric 1
+!
+router multicast
+ ipv4
+ software-forwarding kernel
+ !
+ ipv6
+ software-forwarding kernel
+!
+end
diff --git a/src/tests/osm_end2end/clab/target_config_untagged/r1-startup.cfg b/src/tests/osm_end2end/clab/target_config_untagged/r1-startup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..16a0b0de56935b0852ddf03ba431e17e1073aa4c
--- /dev/null
+++ b/src/tests/osm_end2end/clab/target_config_untagged/r1-startup.cfg
@@ -0,0 +1,55 @@
+! device: r1 (cEOSLab, EOS-4.32.2F-38195967.4322F (engineering build))
+!
+no aaa root
+!
+username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70
+!
+management api http-commands
+ no shutdown
+!
+no service interface inactive port-id allocation disabled
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname r1
+!
+spanning-tree mode mstp
+!
+system l1
+ unsupported speed action error
+ unsupported error-correction action error
+!
+management api gnmi
+ transport grpc default
+!
+management api netconf
+ transport ssh default
+!
+interface Ethernet2
+ no switchport
+ ip address 10.254.172.69/30
+!
+interface Ethernet10
+ no switchport
+ ip address 172.16.1.1/24
+!
+interface Ethernet11
+!
+interface Management0
+ ip address 172.20.20.101/24
+!
+ip routing
+!
+ip route 0.0.0.0/0 172.20.20.1
+ip route 172.16.3.0/24 10.254.172.70 metric 1
+!
+router multicast
+ ipv4
+ software-forwarding kernel
+ !
+ ipv6
+ software-forwarding kernel
+!
+end
diff --git a/src/tests/osm_end2end/clab/target_config_untagged/r2-startup.cfg b/src/tests/osm_end2end/clab/target_config_untagged/r2-startup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..3e59ef041bd327bcbfee3697b7531a08978333f2
--- /dev/null
+++ b/src/tests/osm_end2end/clab/target_config_untagged/r2-startup.cfg
@@ -0,0 +1,54 @@
+! device: r2 (cEOSLab, EOS-4.32.2F-38195967.4322F (engineering build))
+!
+no aaa root
+!
+username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70
+!
+management api http-commands
+ no shutdown
+!
+no service interface inactive port-id allocation disabled
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname r2
+!
+spanning-tree mode mstp
+!
+system l1
+ unsupported speed action error
+ unsupported error-correction action error
+!
+management api gnmi
+ transport grpc default
+!
+management api netconf
+ transport ssh default
+!
+interface Ethernet1
+ no switchport
+ ip address 10.254.172.70/30
+!
+interface Ethernet3
+ no switchport
+ ip address 10.254.187.117/30
+!
+interface Management0
+ ip address 172.20.20.102/24
+!
+ip routing
+!
+ip route 0.0.0.0/0 172.20.20.1
+ip route 172.16.1.0/24 10.254.172.69 metric 1
+ip route 172.16.3.0/24 10.254.187.118 metric 1
+!
+router multicast
+ ipv4
+ software-forwarding kernel
+ !
+ ipv6
+ software-forwarding kernel
+!
+end
diff --git a/src/tests/osm_end2end/clab/target_config_untagged/r3-startup.cfg b/src/tests/osm_end2end/clab/target_config_untagged/r3-startup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..47f0bfe2941da4c887e22af5c907c7138be483df
--- /dev/null
+++ b/src/tests/osm_end2end/clab/target_config_untagged/r3-startup.cfg
@@ -0,0 +1,55 @@
+! device: r3 (cEOSLab, EOS-4.32.2F-38195967.4322F (engineering build))
+!
+no aaa root
+!
+username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70
+!
+management api http-commands
+ no shutdown
+!
+no service interface inactive port-id allocation disabled
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname r3
+!
+spanning-tree mode mstp
+!
+system l1
+ unsupported speed action error
+ unsupported error-correction action error
+!
+management api gnmi
+ transport grpc default
+!
+management api netconf
+ transport ssh default
+!
+interface Ethernet2
+ no switchport
+ ip address 10.254.187.118/30
+!
+interface Ethernet10
+ no switchport
+ ip address 172.16.3.1/24
+!
+interface Ethernet11
+!
+interface Management0
+ ip address 172.20.20.103/24
+!
+ip routing
+!
+ip route 0.0.0.0/0 172.20.20.1
+ip route 172.16.1.0/24 10.254.187.117 metric 1
+!
+router multicast
+ ipv4
+ software-forwarding kernel
+ !
+ ipv6
+ software-forwarding kernel
+!
+end
diff --git a/src/tests/osm_end2end/data/tfs-topology.json b/src/tests/osm_end2end/data/tfs-topology.json
new file mode 100644
index 0000000000000000000000000000000000000000..2bc60573f0bafb84126cb19a31c00f8d19c82abe
--- /dev/null
+++ b/src/tests/osm_end2end/data/tfs-topology.json
@@ -0,0 +1,178 @@
+{
+ "contexts": [
+ {"context_id": {"context_uuid": {"uuid": "admin"}}}
+ ],
+ "topologies": [
+ {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}
+ ],
+ "devices": [
+ {
+ "device_id": {"device_uuid": {"uuid": "dc1_untagged"}}, "device_type": "emu-datacenter",
+ "device_drivers": ["DEVICEDRIVER_UNDEFINED"],
+ "device_config": {"config_rules": [
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+ {"uuid": "eth1", "type": "copper"}, {"uuid": "int", "type": "copper"}
+ ]}}}
+ ]}
+ },
+ {
+ "device_id": {"device_uuid": {"uuid": "dc2_untagged"}}, "device_type": "emu-datacenter",
+ "device_drivers": ["DEVICEDRIVER_UNDEFINED"],
+ "device_config": {"config_rules": [
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+ {"uuid": "eth1", "type": "copper"}, {"uuid": "int", "type": "copper"}
+ ]}}}
+ ]}
+ },
+ {
+ "device_id": {"device_uuid": {"uuid": "dc3_tagged"}}, "device_type": "emu-datacenter",
+ "device_drivers": ["DEVICEDRIVER_UNDEFINED"],
+ "device_config": {"config_rules": [
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+ {"uuid": "eth1", "type": "copper"}, {"uuid": "int", "type": "copper"}
+ ]}}}
+ ]}
+ },
+ {
+ "device_id": {"device_uuid": {"uuid": "dc4_tagged"}}, "device_type": "emu-datacenter",
+ "device_drivers": ["DEVICEDRIVER_UNDEFINED"],
+ "device_config": {"config_rules": [
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+ {"uuid": "eth1", "type": "copper"}, {"uuid": "int", "type": "copper"}
+ ]}}}
+ ]}
+ },
+ {
+ "device_id": {"device_uuid": {"uuid": "r1"}}, "device_type": "packet-router",
+ "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"],
+ "device_config": {"config_rules": [
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.101"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {
+ "username": "admin", "password": "admin", "use_tls": false
+ }}}
+ ]}
+ },
+ {
+ "device_id": {"device_uuid": {"uuid": "r2"}}, "device_type": "packet-router",
+ "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"],
+ "device_config": {"config_rules": [
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.102"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {
+ "username": "admin", "password": "admin", "use_tls": false
+ }}}
+ ]}
+ },
+ {
+ "device_id": {"device_uuid": {"uuid": "r3"}}, "device_type": "packet-router",
+ "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"],
+ "device_config": {"config_rules": [
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.103"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {
+ "username": "admin", "password": "admin", "use_tls": false
+ }}}
+ ]}
+ }
+ ],
+ "links": [
+ {
+ "link_id": {"link_uuid": {"uuid": "r1/Ethernet2==r2/Ethernet1"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet2"}},
+ {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet1"}}
+ ]
+ },
+ {
+ "link_id": {"link_uuid": {"uuid": "r2/Ethernet1==r1/Ethernet2"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet1"}},
+ {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet2"}}
+ ]
+ },
+
+ {
+ "link_id": {"link_uuid": {"uuid": "r2/Ethernet3==r3/Ethernet2"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet3"}},
+ {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet2"}}
+ ]
+ },
+ {
+ "link_id": {"link_uuid": {"uuid": "r3/Ethernet2==r2/Ethernet3"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet2"}},
+ {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet3"}}
+ ]
+ },
+
+ {
+ "link_id": {"link_uuid": {"uuid": "r1/Ethernet10==dc1_untagged/eth1"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet10"}},
+ {"device_id": {"device_uuid": {"uuid": "dc1_untagged"}}, "endpoint_uuid": {"uuid": "eth1"}}
+ ]
+ },
+ {
+ "link_id": {"link_uuid": {"uuid": "dc1_untagged/eth1==r1/Ethernet10"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "dc1_untagged"}}, "endpoint_uuid": {"uuid": "eth1"}},
+ {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet10"}}
+ ]
+ },
+
+ {
+ "link_id": {"link_uuid": {"uuid": "r3/Ethernet10==dc2_untagged/eth1"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet10"}},
+ {"device_id": {"device_uuid": {"uuid": "dc2_untagged"}}, "endpoint_uuid": {"uuid": "eth1"}}
+ ]
+ },
+ {
+ "link_id": {"link_uuid": {"uuid": "dc2_untagged/eth1==r3/Ethernet10"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "dc2_untagged"}}, "endpoint_uuid": {"uuid": "eth1"}},
+ {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet10"}}
+ ]
+ },
+
+ {
+ "link_id": {"link_uuid": {"uuid": "r1/Ethernet11==dc3_tagged/eth1"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet11"}},
+ {"device_id": {"device_uuid": {"uuid": "dc3_tagged"}}, "endpoint_uuid": {"uuid": "eth1"}}
+ ]
+ },
+ {
+ "link_id": {"link_uuid": {"uuid": "dc3_tagged/eth1==r1/Ethernet11"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "dc3_tagged"}}, "endpoint_uuid": {"uuid": "eth1"}},
+ {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet11"}}
+ ]
+ },
+
+ {
+ "link_id": {"link_uuid": {"uuid": "r3/Ethernet11==dc4_tagged/eth1"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet11"}},
+ {"device_id": {"device_uuid": {"uuid": "dc4_tagged"}}, "endpoint_uuid": {"uuid": "eth1"}}
+ ]
+ },
+ {
+ "link_id": {"link_uuid": {"uuid": "dc4_tagged/eth1==r3/Ethernet11"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "dc4_tagged"}}, "endpoint_uuid": {"uuid": "eth1"}},
+ {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet11"}}
+ ]
+ }
+ ]
+}
diff --git a/src/tests/osm_end2end/deploy-scripts/clab-cli-dc1.sh b/src/tests/osm_end2end/deploy-scripts/clab-cli-dc1.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d58a19b199564a852c2de33bf602f29d680026d9
--- /dev/null
+++ b/src/tests/osm_end2end/deploy-scripts/clab-cli-dc1.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-osm_end2end-dc1_untagged bash
diff --git a/src/tests/osm_end2end/deploy-scripts/clab-cli-dc2.sh b/src/tests/osm_end2end/deploy-scripts/clab-cli-dc2.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ec8e33c6c5a07f5ba9cfdadd81912fd3ad8921a7
--- /dev/null
+++ b/src/tests/osm_end2end/deploy-scripts/clab-cli-dc2.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-osm_end2end-dc2_untagged bash
diff --git a/src/tests/osm_end2end/deploy-scripts/clab-cli-dc3.sh b/src/tests/osm_end2end/deploy-scripts/clab-cli-dc3.sh
new file mode 100644
index 0000000000000000000000000000000000000000..7f24a6c5a98eda968846d61e3aaa985241471b4c
--- /dev/null
+++ b/src/tests/osm_end2end/deploy-scripts/clab-cli-dc3.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-osm_end2end-dc3_tagged bash
diff --git a/src/tests/osm_end2end/deploy-scripts/clab-cli-dc4.sh b/src/tests/osm_end2end/deploy-scripts/clab-cli-dc4.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d885d28c9edac0e3f9d13eea725ab3559ad0232c
--- /dev/null
+++ b/src/tests/osm_end2end/deploy-scripts/clab-cli-dc4.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-osm_end2end-dc4_tagged bash
diff --git a/src/tests/osm_end2end/deploy-scripts/clab-cli-r1.sh b/src/tests/osm_end2end/deploy-scripts/clab-cli-r1.sh
new file mode 100755
index 0000000000000000000000000000000000000000..56ba2d32792cd443239e4b2bf4d4de5a3ad8abbb
--- /dev/null
+++ b/src/tests/osm_end2end/deploy-scripts/clab-cli-r1.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-osm_end2end-r1 Cli
diff --git a/src/tests/osm_end2end/deploy-scripts/clab-cli-r2.sh b/src/tests/osm_end2end/deploy-scripts/clab-cli-r2.sh
new file mode 100755
index 0000000000000000000000000000000000000000..66d92d77f09569d8a4e3a12b1baefe761d24035e
--- /dev/null
+++ b/src/tests/osm_end2end/deploy-scripts/clab-cli-r2.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-osm_end2end-r2 Cli
diff --git a/src/tests/osm_end2end/deploy-scripts/clab-cli-r3.sh b/src/tests/osm_end2end/deploy-scripts/clab-cli-r3.sh
new file mode 100755
index 0000000000000000000000000000000000000000..38612f73d4eb5d57daf09967e90cf0f89cbd4234
--- /dev/null
+++ b/src/tests/osm_end2end/deploy-scripts/clab-cli-r3.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-osm_end2end-r3 Cli
diff --git a/src/tests/osm_end2end/deploy-scripts/clab-deploy.sh b/src/tests/osm_end2end/deploy-scripts/clab-deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..fb50064c5bb7bdca873e6fe182853582117e9ac7
--- /dev/null
+++ b/src/tests/osm_end2end/deploy-scripts/clab-deploy.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cd ~/tfs-ctrl/src/tests/osm_end2end
+sudo containerlab deploy --topo clab/osm_end2end.clab.yml
diff --git a/src/tests/osm_end2end/deploy-scripts/clab-destroy.sh b/src/tests/osm_end2end/deploy-scripts/clab-destroy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..1718f3949e11ab091cbdab00f217123fe2c2b493
--- /dev/null
+++ b/src/tests/osm_end2end/deploy-scripts/clab-destroy.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cd ~/tfs-ctrl/src/tests/osm_end2end
+sudo containerlab destroy --topo clab/osm_end2end.clab.yml
+sudo rm -rf clab/clab-osm_end2end/ clab/.osm_end2end.clab.yml.bak
diff --git a/src/tests/osm_end2end/deploy-scripts/clab-inspect.sh b/src/tests/osm_end2end/deploy-scripts/clab-inspect.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d27a044c0a54a6543eaed39a82b635e6193de289
--- /dev/null
+++ b/src/tests/osm_end2end/deploy-scripts/clab-inspect.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cd ~/tfs-ctrl/src/tests/osm_end2end
+sudo containerlab inspect --topo osm_end2end.clab.yml
diff --git a/src/tests/osm_end2end/deploy_specs.sh b/src/tests/osm_end2end/deploy_specs.sh
new file mode 100755
index 0000000000000000000000000000000000000000..72cd25b58a02f442838bab866cc969680c073ebc
--- /dev/null
+++ b/src/tests/osm_end2end/deploy_specs.sh
@@ -0,0 +1,208 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+#export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service nbi"
+
+# Uncomment to activate Monitoring (old)
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Monitoring Framework (new)
+#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation"
+
+# Uncomment to activate QoS Profiles
+#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile"
+
+# Uncomment to activate BGP-LS Speaker
+#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
+
+# Uncomment to activate Optical Controller
+# To manage optical connections, "service" requires "opticalcontroller" to be deployed
+# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
+# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it.
+#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
+# BEFORE="${TFS_COMPONENTS% service*}"
+# AFTER="${TFS_COMPONENTS#* service}"
+# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}"
+#fi
+
+# Uncomment to activate ZTP
+#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp"
+
+# Uncomment to activate Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
+
+# Uncomment to activate Forecaster
+#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster"
+
+# Uncomment to activate E2E Orchestrator
+#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator"
+
+# Uncomment to activate DLT and Interdomain
+#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt"
+#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then
+# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk"
+# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem"
+# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt"
+#fi
+
+# Uncomment to activate QKD App
+# To manage QKD Apps, "service" requires "qkd_app" to be deployed
+# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
+# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it.
+#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
+# BEFORE="${TFS_COMPONENTS% service*}"
+# AFTER="${TFS_COMPONENTS#* service}"
+# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}"
+#fi
+
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy TFS to.
+export TFS_K8S_NAMESPACE="tfs"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+
+# Uncomment to monitor performance of components
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
+# Set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# Disable skip-build flag to rebuild the Docker images.
+export TFS_SKIP_BUILD=""
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4222"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8222"
+
+# Set NATS installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/nats.sh for additional details
+export NATS_DEPLOY_MODE="single"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8812"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9009"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9000"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
+
+
+# ----- Apache Kafka -----------------------------------------------------------
+
+# Set the namespace where Apache Kafka will be deployed.
+export KFK_NAMESPACE="kafka"
+
+# Set the port Apache Kafka server will be exposed to.
+export KFK_SERVER_PORT="9092"
+
+# Set the flag to YES for redeploying of Apache Kafka
+export KFK_REDEPLOY=""
diff --git a/src/tests/osm_end2end/gnmic_lab/README.md b/src/tests/osm_end2end/gnmic_lab/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..596b86b300875cf90216b9838a78cfce855c6a29
--- /dev/null
+++ b/src/tests/osm_end2end/gnmic_lab/README.md
@@ -0,0 +1,36 @@
+# OSM End-to-End gNMI Lab
+
+This folder contains an isolated Containerlab scenario and direct `gnmic`
+helpers to validate cEOS OpenConfig behavior without involving TeraFlowSDN.
+
+The objective is to determine which OpenConfig payloads correctly configure:
+
+- untagged routed access ports
+- tagged routed access ports
+- inter-router L3 links
+
+and to inspect the resulting EOS CLI configuration.
+
+The current verified findings are tracked in [REPORT.md](./REPORT.md).
+
+Typical workflow:
+
+```bash
+cd ~/tfs-ctrl
+src/tests/osm_end2end/gnmic_lab/run-lab.sh deploy
+src/tests/osm_end2end/gnmic_lab/run-lab.sh baseline
+src/tests/osm_end2end/gnmic_lab/run-lab.sh experiment-tagged-subif125
+src/tests/osm_end2end/gnmic_lab/run-lab.sh destroy
+```
+
+Results are written under `src/tests/osm_end2end/gnmic_lab/results/`.
+
+Useful experiment actions:
+
+- `experiment-untagged`
+- `experiment-tagged-subif125`
+- `experiment-tagged-subif0-and-125`
+- `experiment-tagged-subif0-then-125`
+- `experiment-tagged-subif0-vlan125`
+- `experiment-tagged-cli-baseline-and-capture`
+- `experiment-tagged-replace-inferred`
diff --git a/src/tests/osm_end2end/gnmic_lab/REPORT.md b/src/tests/osm_end2end/gnmic_lab/REPORT.md
new file mode 100644
index 0000000000000000000000000000000000000000..26fa0af9bfe7fa4b49f83e611ac2d3c9d09cc920
--- /dev/null
+++ b/src/tests/osm_end2end/gnmic_lab/REPORT.md
@@ -0,0 +1,65 @@
+# gNMI Lab Report
+
+This file tracks direct `gnmic` experiments against the isolated
+`osm_end2end_gnmic` Containerlab scenario.
+
+## Verified
+
+- Untagged routed access works with parent interface type
+ `iana-if-type:ethernetCsmacd` and IPv4 configured on `subinterface[0]`.
+ Resulting EOS CLI on `Ethernet10` is:
+ - `no switchport`
+ - `ip address 172.16.x.1/24`
+- Tagged access with parent interface type `iana-if-type:ethernetCsmacd` and
+ IPv4 plus VLAN 125 configured on `subinterface[125]` is accepted by cEOS,
+ but does not by itself turn the parent into `no switchport`.
+ Resulting EOS CLI on `Ethernet11` is:
+ - parent `Ethernet11` remains empty
+ - child `Ethernet11.125` is created with `encapsulation dot1q vlan 125`
+ and the correct IP address
+ Functional result:
+ - `dc3_tagged -> 172.17.1.1` fails with `Destination Host Unreachable`
+ - the failure is at the first hop, before routed transit matters
+- Tagged access with VLAN 125 configured on `subinterface[0]` is rejected by
+ cEOS as invalid.
+- When the parent `Ethernet11` is manually switched to routed mode with CLI
+ `no switchport`, the exact same tagged `subinterface[125]` setup works.
+ Functional result after also installing tagged transit routes on `r2`:
+ - `dc3_tagged -> 172.17.1.1` succeeds
+ - `dc3_tagged -> 172.17.3.10` succeeds end to end
+- On a fresh lab, the tagged case also works end to end when `Ethernet11` is
+ configured with a gNMI `REPLACE` of the full interface subtree containing:
+ - parent `config.type = iana-if-type:ethernetCsmacd`
+ - `subinterface[0]` with `ipv4.config.enabled = true`
+ - `subinterface[125]` with VLAN 125 match and IPv4 address
+ Functional result:
+ - `dc3_tagged -> 172.17.1.1` succeeds
+ - `dc3_tagged -> 172.17.3.10` succeeds end to end
+- The working tagged state, read back over gNMI, looks like:
+ - parent interface `config.type = iana-if-type:ethernetCsmacd`
+ - `openconfig-if-ethernet:ethernet` subtree present on the parent
+ - `subinterface[0]` present with IPv4 enabled
+ - `subinterface[125]` present with VLAN 125 match and IPv4 address
+
+## Current Conclusion
+
+- The routed core and static routes are not the blocker for the tagged case.
+- The tagged failure with the earlier direct gNMI tests is specifically that
+ an `UPDATE` of only `subinterface[125]` does not cause cEOS to convert
+ `Ethernet11` into a routed parent port.
+- TFS should not try to model the parent as `l3ipvlan` on cEOS. The working
+ parent type is still `iana-if-type:ethernetCsmacd`.
+- A pure OpenConfig solution does exist in this lab:
+ - gNMI `REPLACE` the full `Ethernet11` subtree
+ - include both `subinterface[0]` and `subinterface[125]`
+- The practical TFS implication is that the EOS tagged-access path likely needs
+ interface-subtree replacement semantics, not just incremental updates of the
+ VLAN subinterface.
+
+## Pending
+
+- Confirm whether `UPDATE` of the full subtree with both `subinterface[0]` and
+ `subinterface[125]` can also work, or whether `REPLACE` is strictly required
+ on cEOS.
+- Translate the successful `REPLACE` behavior into the TFS `gnmi_openconfig`
+ driver or YANG handler logic for tagged routed access interfaces.
diff --git a/src/tests/osm_end2end/gnmic_lab/osm_end2end_gnmic.clab.yml b/src/tests/osm_end2end/gnmic_lab/osm_end2end_gnmic.clab.yml
new file mode 100644
index 0000000000000000000000000000000000000000..88bb726c48bf9ce4d734d59d4092810e4c32d670
--- /dev/null
+++ b/src/tests/osm_end2end/gnmic_lab/osm_end2end_gnmic.clab.yml
@@ -0,0 +1,92 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+name: osm_end2end_gnmic
+
+mgmt:
+ network: mgmt-net
+ ipv4-subnet: 172.20.30.0/24
+
+topology:
+ kinds:
+ arista_ceos:
+ kind: arista_ceos
+ image: ceos:4.32.2F
+ linux:
+ kind: linux
+ image: ghcr.io/hellt/network-multitool:latest
+
+ nodes:
+ r1:
+ kind: arista_ceos
+ mgmt-ipv4: 172.20.30.101
+ startup-config: ../clab/r1-startup.cfg
+
+ r2:
+ kind: arista_ceos
+ mgmt-ipv4: 172.20.30.102
+ startup-config: ../clab/r2-startup.cfg
+
+ r3:
+ kind: arista_ceos
+ mgmt-ipv4: 172.20.30.103
+ startup-config: ../clab/r3-startup.cfg
+
+ dc1_untagged:
+ kind: linux
+ mgmt-ipv4: 172.20.30.201
+ exec:
+ - ip link set address 00:c1:ab:00:01:0a dev eth1
+ - ip link set eth1 up
+ - ip address add 172.16.1.10/24 dev eth1
+ - ip route add 172.16.3.0/24 via 172.16.1.1
+
+ dc2_untagged:
+ kind: linux
+ mgmt-ipv4: 172.20.30.202
+ exec:
+ - ip link set address 00:c1:ab:00:02:0a dev eth1
+ - ip link set eth1 up
+ - ip address add 172.16.3.10/24 dev eth1
+ - ip route add 172.16.1.0/24 via 172.16.3.1
+
+ dc3_tagged:
+ kind: linux
+ mgmt-ipv4: 172.20.30.203
+ exec:
+ - ip link set address 00:c1:ab:00:03:0a dev eth1
+ - ip link set eth1 up
+ - ip link add link eth1 name eth1.125 type vlan id 125
+ - ip address add 172.17.1.10/24 dev eth1.125
+ - ip link set eth1.125 up
+ - ip route add 172.17.3.0/24 via 172.17.1.1
+
+ dc4_tagged:
+ kind: linux
+ mgmt-ipv4: 172.20.30.204
+ exec:
+ - ip link set address 00:c1:ab:00:04:0a dev eth1
+ - ip link set eth1 up
+ - ip link add link eth1 name eth1.125 type vlan id 125
+ - ip address add 172.17.3.10/24 dev eth1.125
+ - ip link set eth1.125 up
+ - ip route add 172.17.1.0/24 via 172.17.3.1
+
+ links:
+ - endpoints: ["r1:eth2", "r2:eth1"]
+ - endpoints: ["r2:eth3", "r3:eth2"]
+ - endpoints: ["r1:eth10", "dc1_untagged:eth1"]
+ - endpoints: ["r3:eth10", "dc2_untagged:eth1"]
+ - endpoints: ["r1:eth11", "dc3_tagged:eth1"]
+ - endpoints: ["r3:eth11", "dc4_tagged:eth1"]
diff --git a/src/tests/osm_end2end/gnmic_lab/run-lab.sh b/src/tests/osm_end2end/gnmic_lab/run-lab.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e3885be362695942414c7d950b45a899daddad5b
--- /dev/null
+++ b/src/tests/osm_end2end/gnmic_lab/run-lab.sh
@@ -0,0 +1,458 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
+LAB_NAME="osm_end2end_gnmic"
+TOPO_FILE="${SCRIPT_DIR}/osm_end2end_gnmic.clab.yml"
+RESULTS_DIR="${SCRIPT_DIR}/results"
+
+OSM_GNMIC_USE_SUDO="${OSM_GNMIC_USE_SUDO:-yes}"
+GNMIC_USER="${GNMIC_USER:-admin}"
+GNMIC_PASS="${GNMIC_PASS:-admin}"
+GNMIC_PORT="${GNMIC_PORT:-6030}"
+GNMIC_ENCODING="${GNMIC_ENCODING:-json_ietf}"
+GNMIC_TIMEOUT="${GNMIC_TIMEOUT:-30s}"
+
+mkdir -p "${RESULTS_DIR}"
+
+clab() {
+ if [[ "${OSM_GNMIC_USE_SUDO}" == "yes" ]]; then
+ sudo containerlab "$@"
+ else
+ containerlab "$@"
+ fi
+}
+
+gnmi() {
+ local address=$1
+ shift
+ gnmic --address "${address}" --port "${GNMIC_PORT}" \
+ --username "${GNMIC_USER}" --password "${GNMIC_PASS}" \
+ --insecure --encoding "${GNMIC_ENCODING}" --timeout "${GNMIC_TIMEOUT}" "$@"
+}
+
+wait_for_router_ready() {
+ local router=$1
+ local address
+ address="$(router_address "${router}")"
+
+ echo "Waiting for ${router} gNMI readiness at ${address}..."
+ for _ in $(seq 1 60); do
+ if gnmi "${address}" capabilities >/dev/null 2>&1 && \
+ gnmi "${address}" get --path /system/state/hostname >/dev/null 2>&1; then
+ echo "${router} is ready."
+ return 0
+ fi
+ sleep 2
+ done
+
+ echo "Timed out waiting for ${router} gNMI readiness" >&2
+ return 1
+}
+
+gnmi_update_json() {
+ local address=$1
+ local path=$2
+ local json_value=$3
+ local payload_file
+ payload_file="$(mktemp)"
+ printf '%s\n' "${json_value}" > "${payload_file}"
+ gnmi "${address}" set --update-path "${path}" --update-file "${payload_file}"
+ rm -f "${payload_file}"
+}
+
+gnmi_replace_json() {
+ local address=$1
+ local path=$2
+ local json_value=$3
+ local payload_file
+ payload_file="$(mktemp)"
+ printf '%s\n' "${json_value}" > "${payload_file}"
+ gnmi "${address}" set --replace-path "${path}" --replace-file "${payload_file}"
+ rm -f "${payload_file}"
+}
+
+router_address() {
+ case "$1" in
+ r1) echo "172.20.30.101" ;;
+ r2) echo "172.20.30.102" ;;
+ r3) echo "172.20.30.103" ;;
+ *) echo "Unknown router: $1" >&2; exit 1 ;;
+ esac
+}
+
+show_run() {
+ local router=$1
+ docker exec "clab-${LAB_NAME}-${router}" \
+ bash -lc 'FastCli -p 15 -c "show running-config"'
+}
+
+show_run_interfaces() {
+ local router=$1
+ local iface_list=$2
+ docker exec "clab-${LAB_NAME}-${router}" \
+ bash -lc "FastCli -p 15 -c 'show running-config interfaces ${iface_list}'"
+}
+
+cli_config() {
+ local router=$1
+ local commands=$2
+ docker exec "clab-${LAB_NAME}-${router}" \
+ bash -lc "printf '%b' \"configure terminal\n${commands}\nend\n\" | Cli -p 15"
+}
+
+ping_node() {
+ local node=$1
+ local ip=$2
+ clab exec --name "${LAB_NAME}" --label "clab-node-name=${node}" \
+ --cmd "ping -n -c3 ${ip}" --format json
+}
+
+deploy() {
+ clab destroy --cleanup --topo "${TOPO_FILE}" || true
+ clab deploy --reconfigure --topo "${TOPO_FILE}"
+ wait_for_router_ready r1
+ wait_for_router_ready r2
+ wait_for_router_ready r3
+}
+
+destroy() {
+ clab destroy --cleanup --topo "${TOPO_FILE}" || true
+}
+
+baseline() {
+ show_run r1 | tee "${RESULTS_DIR}/r1-baseline.txt"
+ show_run r2 | tee "${RESULTS_DIR}/r2-baseline.txt"
+ show_run r3 | tee "${RESULTS_DIR}/r3-baseline.txt"
+ gnmi "$(router_address r1)" get --path /interfaces/interface --path /network-instances/network-instance \
+ > "${RESULTS_DIR}/r1-baseline-gnmi.json"
+ gnmi "$(router_address r2)" get --path /interfaces/interface --path /network-instances/network-instance \
+ > "${RESULTS_DIR}/r2-baseline-gnmi.json"
+ gnmi "$(router_address r3)" get --path /interfaces/interface --path /network-instances/network-instance \
+ > "${RESULTS_DIR}/r3-baseline-gnmi.json"
+}
+
+capture_router_state() {
+ local label=$1
+ local router=$2
+ local address
+ local output_dir
+ local interface_args=()
+ local interface_show=""
+ address="$(router_address "${router}")"
+ output_dir="${RESULTS_DIR}/${label}"
+
+ mkdir -p "${output_dir}"
+ show_run "${router}" > "${output_dir}/${router}-show-run.txt"
+
+ case "${router}" in
+ r1|r3)
+ interface_show="Ethernet2 Ethernet10 Ethernet11"
+ interface_args=(
+ --path "/interfaces/interface[name=Ethernet2]"
+ --path "/interfaces/interface[name=Ethernet10]"
+ --path "/interfaces/interface[name=Ethernet11]"
+ )
+ ;;
+ r2)
+ interface_show="Ethernet1 Ethernet3"
+ interface_args=(
+ --path "/interfaces/interface[name=Ethernet1]"
+ --path "/interfaces/interface[name=Ethernet3]"
+ )
+ ;;
+ *)
+ echo "Unsupported router for capture: ${router}" >&2
+ exit 1
+ ;;
+ esac
+
+ show_run_interfaces "${router}" "${interface_show}" \
+ > "${output_dir}/${router}-show-run-interfaces.txt"
+ gnmi "${address}" get \
+ "${interface_args[@]}" \
+ --path "/network-instances/network-instance[name=default]/protocols/protocol[identifier=STATIC][name=STATIC]" \
+ > "${output_dir}/${router}-state.json"
+}
+
+capture_lab_state() {
+ local label=$1
+ capture_router_state "${label}" r1
+ capture_router_state "${label}" r2
+ capture_router_state "${label}" r3
+}
+
+configure_router_l3_links() {
+ local router=$1
+ local uplink=$2
+ local uplink_ip=$3
+ local uplink_prefix=$4
+ local downlink=${5:-}
+ local downlink_ip=${6:-}
+ local downlink_prefix=${7:-}
+
+ local address
+ address="$(router_address "${router}")"
+
+ gnmi_update_json "${address}" \
+ "/interfaces/interface[name=${uplink}]" \
+ "{\"name\":\"${uplink}\",\"config\":{\"name\":\"${uplink}\",\"type\":\"iana-if-type:ethernetCsmacd\",\"enabled\":true},\"subinterfaces\":{\"subinterface\":[{\"index\":0,\"config\":{\"index\":0,\"enabled\":true},\"openconfig-if-ip:ipv4\":{\"config\":{\"enabled\":true},\"addresses\":{\"address\":[{\"ip\":\"${uplink_ip}\",\"config\":{\"ip\":\"${uplink_ip}\",\"prefix-length\":${uplink_prefix}}}]}}}]}}"
+
+ if [[ -n "${downlink}" ]]; then
+ gnmi_update_json "${address}" \
+ "/interfaces/interface[name=${downlink}]" \
+ "{\"name\":\"${downlink}\",\"config\":{\"name\":\"${downlink}\",\"type\":\"iana-if-type:ethernetCsmacd\",\"enabled\":true},\"subinterfaces\":{\"subinterface\":[{\"index\":0,\"config\":{\"index\":0,\"enabled\":true},\"openconfig-if-ip:ipv4\":{\"config\":{\"enabled\":true},\"addresses\":{\"address\":[{\"ip\":\"${downlink_ip}\",\"config\":{\"ip\":\"${downlink_ip}\",\"prefix-length\":${downlink_prefix}}}]}}}]}}"
+ fi
+}
+
+configure_static_route() {
+ local router=$1
+ local prefix=$2
+ local next_hop=$3
+ local index=$4
+ local address
+ address="$(router_address "${router}")"
+
+ gnmi_update_json "${address}" \
+ "/network-instances/network-instance[name=default]/protocols/protocol[identifier=STATIC][name=STATIC]" \
+ "{\"identifier\":\"openconfig-policy-types:STATIC\",\"name\":\"STATIC\",\"config\":{\"identifier\":\"openconfig-policy-types:STATIC\",\"name\":\"STATIC\",\"enabled\":true},\"static-routes\":{\"static\":[{\"prefix\":\"${prefix}\",\"config\":{\"prefix\":\"${prefix}\"},\"next-hops\":{\"next-hop\":[{\"index\":\"${index}\",\"config\":{\"index\":\"${index}\",\"next-hop\":\"${next_hop}\",\"metric\":1}}]}}]}}"
+}
+
+configure_access_subif0() {
+ local router=$1
+ local iface=$2
+ local ip=$3
+ local prefix=$4
+ local address
+ address="$(router_address "${router}")"
+
+ gnmi_update_json "${address}" \
+ "/interfaces/interface[name=${iface}]" \
+ "{\"name\":\"${iface}\",\"config\":{\"name\":\"${iface}\",\"type\":\"iana-if-type:ethernetCsmacd\",\"enabled\":true},\"subinterfaces\":{\"subinterface\":[{\"index\":0,\"config\":{\"index\":0,\"enabled\":true},\"openconfig-if-ip:ipv4\":{\"config\":{\"enabled\":true},\"addresses\":{\"address\":[{\"ip\":\"${ip}\",\"config\":{\"ip\":\"${ip}\",\"prefix-length\":${prefix}}}]}}}]}}"
+}
+
+configure_access_subif0_empty() {
+ local router=$1
+ local iface=$2
+ local address
+ address="$(router_address "${router}")"
+
+ gnmi_update_json "${address}" \
+ "/interfaces/interface[name=${iface}]" \
+ "{\"name\":\"${iface}\",\"config\":{\"name\":\"${iface}\",\"type\":\"iana-if-type:ethernetCsmacd\",\"enabled\":true},\"subinterfaces\":{\"subinterface\":[{\"index\":0,\"config\":{\"index\":0,\"enabled\":true},\"openconfig-if-ip:ipv4\":{\"config\":{\"enabled\":true}}}]}}"
+}
+
+configure_access_subif125() {
+ local router=$1
+ local iface=$2
+ local ip=$3
+ local prefix=$4
+ local vlan_id=$5
+ local address
+ address="$(router_address "${router}")"
+
+ gnmi_update_json "${address}" \
+ "/interfaces/interface[name=${iface}]" \
+ "{\"name\":\"${iface}\",\"config\":{\"name\":\"${iface}\",\"type\":\"iana-if-type:ethernetCsmacd\",\"enabled\":true},\"subinterfaces\":{\"subinterface\":[{\"index\":${vlan_id},\"config\":{\"index\":${vlan_id},\"enabled\":true},\"openconfig-vlan:vlan\":{\"match\":{\"single-tagged\":{\"config\":{\"vlan-id\":${vlan_id}}}}},\"openconfig-if-ip:ipv4\":{\"config\":{\"enabled\":true},\"addresses\":{\"address\":[{\"ip\":\"${ip}\",\"config\":{\"ip\":\"${ip}\",\"prefix-length\":${prefix}}}]}}}]}}"
+}
+
+configure_access_subif0_and_125() {
+ local router=$1
+ local iface=$2
+ local ip=$3
+ local prefix=$4
+ local vlan_id=$5
+ local address
+ address="$(router_address "${router}")"
+
+ gnmi_update_json "${address}" \
+ "/interfaces/interface[name=${iface}]" \
+ "{\"name\":\"${iface}\",\"config\":{\"name\":\"${iface}\",\"type\":\"iana-if-type:ethernetCsmacd\",\"enabled\":true},\"subinterfaces\":{\"subinterface\":[{\"index\":0,\"config\":{\"index\":0,\"enabled\":true},\"openconfig-if-ip:ipv4\":{\"config\":{\"enabled\":true}}},{\"index\":${vlan_id},\"config\":{\"index\":${vlan_id},\"enabled\":true},\"openconfig-vlan:vlan\":{\"match\":{\"single-tagged\":{\"config\":{\"vlan-id\":${vlan_id}}}}},\"openconfig-if-ip:ipv4\":{\"config\":{\"enabled\":true},\"addresses\":{\"address\":[{\"ip\":\"${ip}\",\"config\":{\"ip\":\"${ip}\",\"prefix-length\":${prefix}}}]}}}]}}"
+}
+
+replace_access_subif0_and_125() {
+ local router=$1
+ local iface=$2
+ local ip=$3
+ local prefix=$4
+ local vlan_id=$5
+ local address
+ address="$(router_address "${router}")"
+
+ gnmi_replace_json "${address}" \
+ "/interfaces/interface[name=${iface}]" \
+ "{\"name\":\"${iface}\",\"config\":{\"name\":\"${iface}\",\"type\":\"iana-if-type:ethernetCsmacd\",\"enabled\":true},\"subinterfaces\":{\"subinterface\":[{\"index\":0,\"config\":{\"index\":0,\"enabled\":true},\"openconfig-if-ip:ipv4\":{\"config\":{\"enabled\":true}}},{\"index\":${vlan_id},\"config\":{\"index\":${vlan_id},\"enabled\":true},\"openconfig-vlan:vlan\":{\"match\":{\"single-tagged\":{\"config\":{\"vlan-id\":${vlan_id}}}}},\"openconfig-if-ip:ipv4\":{\"config\":{\"enabled\":true},\"addresses\":{\"address\":[{\"ip\":\"${ip}\",\"config\":{\"ip\":\"${ip}\",\"prefix-length\":${prefix}}}]}}}]}}"
+}
+
+configure_access_subif0_vlan125() {
+ local router=$1
+ local iface=$2
+ local ip=$3
+ local prefix=$4
+ local vlan_id=$5
+ local address
+ address="$(router_address "${router}")"
+
+ gnmi_update_json "${address}" \
+ "/interfaces/interface[name=${iface}]" \
+ "{\"name\":\"${iface}\",\"config\":{\"name\":\"${iface}\",\"type\":\"iana-if-type:ethernetCsmacd\",\"enabled\":true},\"subinterfaces\":{\"subinterface\":[{\"index\":0,\"config\":{\"index\":0,\"enabled\":true},\"openconfig-vlan:vlan\":{\"match\":{\"single-tagged\":{\"config\":{\"vlan-id\":${vlan_id}}}}},\"openconfig-if-ip:ipv4\":{\"config\":{\"enabled\":true},\"addresses\":{\"address\":[{\"ip\":\"${ip}\",\"config\":{\"ip\":\"${ip}\",\"prefix-length\":${prefix}}}]}}}]}}"
+}
+
+configure_core() {
+ configure_router_l3_links r1 Ethernet2 10.254.203.193 30
+ configure_router_l3_links r2 Ethernet1 10.254.203.194 30 Ethernet3 10.254.218.241 30
+ configure_router_l3_links r3 Ethernet2 10.254.218.242 30
+
+ configure_static_route r1 172.16.3.0/24 10.254.203.194 AUTO_1_10-254-203-194
+ configure_static_route r2 172.16.1.0/24 10.254.203.193 AUTO_1_10-254-203-193
+ configure_static_route r2 172.16.3.0/24 10.254.218.242 AUTO_1_10-254-218-242
+ configure_static_route r3 172.16.1.0/24 10.254.218.241 AUTO_1_10-254-218-241
+}
+
+configure_tagged_routes() {
+ configure_static_route r1 172.17.3.0/24 10.254.203.194 AUTO_1_10-254-203-194-tagged
+ configure_static_route r2 172.17.1.0/24 10.254.203.193 AUTO_1_10-254-203-193-tagged
+ configure_static_route r2 172.17.3.0/24 10.254.218.242 AUTO_1_10-254-218-242-tagged
+ configure_static_route r3 172.17.1.0/24 10.254.218.241 AUTO_1_10-254-218-241-tagged
+}
+
+experiment_untagged() {
+ configure_core
+ configure_access_subif0 r1 Ethernet10 172.16.1.1 24
+ configure_access_subif0 r3 Ethernet10 172.16.3.1 24
+ show_run r1 | tee "${RESULTS_DIR}/r1-untagged.txt"
+ show_run r3 | tee "${RESULTS_DIR}/r3-untagged.txt"
+ ping_node dc1_untagged 172.16.1.1 | tee "${RESULTS_DIR}/ping-dc1-localgw-untagged.json"
+ ping_node dc1_untagged 172.16.3.1 | tee "${RESULTS_DIR}/ping-dc1-remotegw-untagged.json"
+ ping_node dc1_untagged 172.16.3.10 | tee "${RESULTS_DIR}/ping-dc1-remotehost-untagged.json"
+}
+
+experiment_tagged_subif125() {
+ configure_core
+ configure_tagged_routes
+ configure_access_subif125 r1 Ethernet11 172.17.1.1 24 125
+ configure_access_subif125 r3 Ethernet11 172.17.3.1 24 125
+ show_run r1 | tee "${RESULTS_DIR}/r1-tagged-subif125.txt"
+ show_run r2 | tee "${RESULTS_DIR}/r2-tagged-subif125.txt"
+ show_run r3 | tee "${RESULTS_DIR}/r3-tagged-subif125.txt"
+ ping_node dc3_tagged 172.17.1.1 | tee "${RESULTS_DIR}/ping-dc3-localgw-subif125.json"
+ ping_node dc3_tagged 172.17.3.1 | tee "${RESULTS_DIR}/ping-dc3-remotegw-subif125.json"
+ ping_node dc3_tagged 172.17.3.10 | tee "${RESULTS_DIR}/ping-dc3-remotehost-subif125.json"
+}
+
+experiment_tagged_subif0_and_125() {
+ configure_core
+ configure_tagged_routes
+ configure_access_subif0_and_125 r1 Ethernet11 172.17.1.1 24 125
+ configure_access_subif0_and_125 r3 Ethernet11 172.17.3.1 24 125
+ show_run r1 | tee "${RESULTS_DIR}/r1-tagged-subif0-and-125.txt"
+ show_run r2 | tee "${RESULTS_DIR}/r2-tagged-subif0-and-125.txt"
+ show_run r3 | tee "${RESULTS_DIR}/r3-tagged-subif0-and-125.txt"
+ ping_node dc3_tagged 172.17.1.1 | tee "${RESULTS_DIR}/ping-dc3-localgw-subif0-and-125.json"
+ ping_node dc3_tagged 172.17.3.1 | tee "${RESULTS_DIR}/ping-dc3-remotegw-subif0-and-125.json"
+ ping_node dc3_tagged 172.17.3.10 | tee "${RESULTS_DIR}/ping-dc3-remotehost-subif0-and-125.json"
+}
+
+experiment_tagged_subif0_then_125() {
+ configure_core
+ configure_tagged_routes
+ configure_access_subif0_empty r1 Ethernet11
+ configure_access_subif0_empty r3 Ethernet11
+ configure_access_subif125 r1 Ethernet11 172.17.1.1 24 125
+ configure_access_subif125 r3 Ethernet11 172.17.3.1 24 125
+ show_run r1 | tee "${RESULTS_DIR}/r1-tagged-subif0-then-125.txt"
+ show_run r2 | tee "${RESULTS_DIR}/r2-tagged-subif0-then-125.txt"
+ show_run r3 | tee "${RESULTS_DIR}/r3-tagged-subif0-then-125.txt"
+ ping_node dc3_tagged 172.17.1.1 | tee "${RESULTS_DIR}/ping-dc3-localgw-subif0-then-125.json"
+ ping_node dc3_tagged 172.17.3.1 | tee "${RESULTS_DIR}/ping-dc3-remotegw-subif0-then-125.json"
+ ping_node dc3_tagged 172.17.3.10 | tee "${RESULTS_DIR}/ping-dc3-remotehost-subif0-then-125.json"
+}
+
+experiment_tagged_subif0_vlan125() {
+ configure_core
+ configure_tagged_routes
+ configure_access_subif0_vlan125 r1 Ethernet11 172.17.1.1 24 125
+ configure_access_subif0_vlan125 r3 Ethernet11 172.17.3.1 24 125
+ show_run r1 | tee "${RESULTS_DIR}/r1-tagged-subif0-vlan125.txt"
+ show_run r2 | tee "${RESULTS_DIR}/r2-tagged-subif0-vlan125.txt"
+ show_run r3 | tee "${RESULTS_DIR}/r3-tagged-subif0-vlan125.txt"
+ ping_node dc3_tagged 172.17.1.1 | tee "${RESULTS_DIR}/ping-dc3-localgw-subif0-vlan125.json"
+ ping_node dc3_tagged 172.17.3.1 | tee "${RESULTS_DIR}/ping-dc3-remotegw-subif0-vlan125.json"
+ ping_node dc3_tagged 172.17.3.10 | tee "${RESULTS_DIR}/ping-dc3-remotehost-subif0-vlan125.json"
+}
+
+configure_cli_working_tagged_access() {
+ cli_config r1 \
+ "interface Ethernet11\nno switchport\ninterface Ethernet11.125\nencapsulation dot1q vlan 125\nip address 172.17.1.1/24"
+ cli_config r3 \
+ "interface Ethernet11\nno switchport\ninterface Ethernet11.125\nencapsulation dot1q vlan 125\nip address 172.17.3.1/24"
+}
+
+experiment_tagged_cli_baseline_and_capture() {
+ capture_lab_state tagged-cli-clean
+ configure_core
+ configure_tagged_routes
+ configure_cli_working_tagged_access
+ capture_lab_state tagged-cli-working
+ ping_node dc3_tagged 172.17.1.1 | tee "${RESULTS_DIR}/ping-dc3-localgw-tagged-cli.json"
+ ping_node dc3_tagged 172.17.3.1 | tee "${RESULTS_DIR}/ping-dc3-remotegw-tagged-cli.json"
+ ping_node dc3_tagged 172.17.3.10 | tee "${RESULTS_DIR}/ping-dc3-remotehost-tagged-cli.json"
+}
+
+experiment_tagged_replace_inferred() {
+ configure_core
+ configure_tagged_routes
+ replace_access_subif0_and_125 r1 Ethernet11 172.17.1.1 24 125
+ replace_access_subif0_and_125 r3 Ethernet11 172.17.3.1 24 125
+ capture_lab_state tagged-replace-inferred
+ ping_node dc3_tagged 172.17.1.1 | tee "${RESULTS_DIR}/ping-dc3-localgw-tagged-replace.json"
+ ping_node dc3_tagged 172.17.3.1 | tee "${RESULTS_DIR}/ping-dc3-remotegw-tagged-replace.json"
+ ping_node dc3_tagged 172.17.3.10 | tee "${RESULTS_DIR}/ping-dc3-remotehost-tagged-replace.json"
+}
+
+get_interfaces() {
+ local router=$1
+ gnmi "$(router_address "${router}")" get --path /interfaces/interface \
+ | tee "${RESULTS_DIR}/${router}-interfaces.json"
+}
+
+main() {
+ local action="${1:-}"
+ case "${action}" in
+ deploy) deploy ;;
+ destroy) destroy ;;
+ baseline) baseline ;;
+ capture-tagged-state) capture_lab_state tagged-snapshot ;;
+ experiment-untagged) experiment_untagged ;;
+ experiment-tagged-subif125) experiment_tagged_subif125 ;;
+ experiment-tagged-subif0-and-125) experiment_tagged_subif0_and_125 ;;
+ experiment-tagged-subif0-then-125) experiment_tagged_subif0_then_125 ;;
+ experiment-tagged-subif0-vlan125) experiment_tagged_subif0_vlan125 ;;
+ experiment-tagged-cli-baseline-and-capture) experiment_tagged_cli_baseline_and_capture ;;
+ experiment-tagged-replace-inferred) experiment_tagged_replace_inferred ;;
+ get-r1-interfaces) get_interfaces r1 ;;
+ get-r2-interfaces) get_interfaces r2 ;;
+ get-r3-interfaces) get_interfaces r3 ;;
+ *)
+ echo "Usage: $0 {deploy|destroy|baseline|capture-tagged-state|experiment-untagged|experiment-tagged-subif125|experiment-tagged-subif0-and-125|experiment-tagged-subif0-then-125|experiment-tagged-subif0-vlan125|experiment-tagged-cli-baseline-and-capture|experiment-tagged-replace-inferred|get-r1-interfaces|get-r2-interfaces|get-r3-interfaces}" >&2
+ exit 1
+ ;;
+ esac
+}
+
+main "$@"
diff --git a/src/tests/osm_end2end/redeploy-tfs.sh b/src/tests/osm_end2end/redeploy-tfs.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d1e00f3e564781d3b635cbac9c867d45693433e6
--- /dev/null
+++ b/src/tests/osm_end2end/redeploy-tfs.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source ~/tfs-ctrl/src/tests/osm_end2end/deploy_specs.sh
+./deploy/all.sh
diff --git a/src/tests/osm_end2end/requirements.in b/src/tests/osm_end2end/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..5c92783a232a5bbe18b4dd6d0e6735e3ce8414c2
--- /dev/null
+++ b/src/tests/osm_end2end/requirements.in
@@ -0,0 +1,15 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+requests==2.27.*
diff --git a/src/tests/osm_end2end/run-local.sh b/src/tests/osm_end2end/run-local.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3e34e8f86782a0464bfec4f6f5495c5c56c484fd
--- /dev/null
+++ b/src/tests/osm_end2end/run-local.sh
@@ -0,0 +1,364 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
+TEST_NAME="osm_end2end"
+
+IMAGE_TAG="${OSM_E2E_IMAGE:-${TEST_NAME}:local}"
+RESULTS_DIR="${OSM_E2E_RESULTS_DIR:-${SCRIPT_DIR}/local_results}"
+COMPONENT_LOGS_DIR="${RESULTS_DIR}/component_logs"
+CLAB_TMP_DIR="${OSM_E2E_CLAB_TMPDIR:-/tmp/clab/${TEST_NAME}}"
+RUNTIME_ENV_FILE="${REPO_ROOT}/tfs_runtime_env_vars.sh"
+
+OSM_E2E_CLEAN_START="${OSM_E2E_CLEAN_START:-yes}"
+OSM_E2E_DEPLOY_TFS="${OSM_E2E_DEPLOY_TFS:-yes}"
+OSM_E2E_BUILD_IMAGE="${OSM_E2E_BUILD_IMAGE:-yes}"
+OSM_E2E_CONTAINERLAB_USE_SUDO="${OSM_E2E_CONTAINERLAB_USE_SUDO:-yes}"
+OSM_E2E_WAIT_AFTER_CREATE="${OSM_E2E_WAIT_AFTER_CREATE:-60}"
+OSM_E2E_WAIT_AFTER_REMOVE="${OSM_E2E_WAIT_AFTER_REMOVE:-60}"
+
+read -r -a KUBECTL_CMD_ARR <<< "${KUBECTL_CMD:-kubectl}"
+read -r -a HELM_CMD_ARR <<< "${HELM_CMD:-helm3}"
+read -r -a MICROK8S_CMD_ARR <<< "${MICROK8S_CMD:-microk8s}"
+
+PHASES=("$@")
+if [[ ${#PHASES[@]} -eq 0 ]]; then
+ PHASES=("all")
+fi
+
+usage() {
+ cat <<'EOF'
+Usage:
+ ./src/tests/osm_end2end/run-local.sh [all|prepare|build-image|deploy-clab|deploy-tfs|onboarding|untagged|tagged|logs|destroy-clab]
+
+Environment:
+ OSM_E2E_IMAGE=osm_end2end:local
+ OSM_E2E_RESULTS_DIR=src/tests/osm_end2end/local_results
+ OSM_E2E_CLEAN_START=yes|no
+ OSM_E2E_DEPLOY_TFS=yes|no
+ OSM_E2E_BUILD_IMAGE=yes|no
+ OSM_E2E_CONTAINERLAB_USE_SUDO=yes|no
+ KUBECTL_CMD="kubectl"
+ HELM_CMD="helm3"
+ MICROK8S_CMD="microk8s"
+EOF
+}
+
+require_cmd() {
+ local cmd=$1
+ command -v "${cmd}" >/dev/null 2>&1 || {
+ echo "Command not found: ${cmd}" >&2
+ exit 1
+ }
+}
+
+kctl() {
+ "${KUBECTL_CMD_ARR[@]}" "$@"
+}
+
+hctl() {
+ "${HELM_CMD_ARR[@]}" "$@"
+}
+
+mctl() {
+ "${MICROK8S_CMD_ARR[@]}" "$@"
+}
+
+clab() {
+ if [[ "${OSM_E2E_CONTAINERLAB_USE_SUDO}" == "yes" ]]; then
+ sudo containerlab "$@"
+ else
+ containerlab "$@"
+ fi
+}
+
+docker_cleanup_for_test() {
+ clab destroy --all --cleanup || true
+ docker container prune --force
+ docker image prune --force
+ docker network prune --force
+}
+
+cleanup_k8s_for_test() {
+ local existing_namespaces
+ existing_namespaces="$(kctl get namespace -o jsonpath='{.items[*].metadata.name}')"
+ local old_nats_namespaces
+ old_nats_namespaces="$(echo "${existing_namespaces}" | tr ' ' '\n' | grep -E '^nats' || true)"
+ for ns in ${old_nats_namespaces}; do
+ if hctl status "${ns}" -n "${ns}" >/dev/null 2>&1; then
+ hctl uninstall "${ns}" -n "${ns}" || true
+ fi
+ done
+
+ local old_namespaces
+ old_namespaces="$(echo "${existing_namespaces}" | tr ' ' '\n' | grep -E '^(tfs|crdb|qdb|kafka|nats)$' || true)"
+ if [[ -n "${old_namespaces}" ]]; then
+ kctl delete namespace ${old_namespaces} || true
+ fi
+
+ kctl get pods --all-namespaces --no-headers --field-selector=status.phase=Failed \
+ -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name \
+ | xargs --no-run-if-empty --max-args=2 kctl delete pod --namespace || true
+}
+
+wait_for_k8s() {
+ mctl status --wait-ready
+ local loop_max_attempts=30
+ local loop_counter=0
+ while ! kctl get pods --all-namespaces >/dev/null 2>&1; do
+ printf "."
+ sleep 1
+ loop_counter=$((loop_counter + 1))
+ if [[ "${loop_counter}" -ge "${loop_max_attempts}" ]]; then
+ echo "Max attempts reached waiting for Kubernetes API." >&2
+ exit 1
+ fi
+ done
+ echo
+ kctl get pods --all-namespaces
+}
+
+wait_for_tfs_deployments() {
+ local namespace="${TFS_K8S_NAMESPACE:-tfs}"
+ local deployments=(
+ contextservice
+ deviceservice
+ pathcompservice
+ serviceservice
+ nbiservice
+ )
+ local deployment
+ for deployment in "${deployments[@]}"; do
+ kctl --namespace "${namespace}" rollout status "deployment/${deployment}" --timeout=300s
+ done
+}
+
+prepare_clab_topology() {
+ rm -rf "${CLAB_TMP_DIR}"
+ mkdir -p "${CLAB_TMP_DIR}"
+ cp -R "${SCRIPT_DIR}/clab/." "${CLAB_TMP_DIR}/"
+}
+
+build_image() {
+ docker buildx build -t "${IMAGE_TAG}" -f "${SCRIPT_DIR}/Dockerfile" "${REPO_ROOT}"
+}
+
+deploy_clab() {
+ prepare_clab_topology
+ clab deploy --reconfigure --topo "${CLAB_TMP_DIR}/${TEST_NAME}.clab.yml"
+}
+
+destroy_clab() {
+ if [[ -f "${CLAB_TMP_DIR}/${TEST_NAME}.clab.yml" ]]; then
+ clab destroy --cleanup --topo "${CLAB_TMP_DIR}/${TEST_NAME}.clab.yml" || true
+ fi
+}
+
+dump_router_configs() {
+ local label=$1
+ local slug
+ slug="$(echo "${label}" | tr ' /' '__')"
+ {
+ echo "==== ${label} ===="
+ clab exec --name "${TEST_NAME}" --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'"show running-config\""
+ clab exec --name "${TEST_NAME}" --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'"show running-config\""
+ clab exec --name "${TEST_NAME}" --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'"show running-config\""
+ } | tee "${RESULTS_DIR}/router-config-${slug}.log"
+}
+
+dump_component_logs() {
+ mkdir -p "${COMPONENT_LOGS_DIR}"
+ local namespace="${TFS_K8S_NAMESPACE:-tfs}"
+ local components=(
+ contextservice:server
+ deviceservice:server
+ pathcompservice:frontend
+ serviceservice:server
+ nbiservice:server
+ )
+ local entry deployment container
+ for entry in "${components[@]}"; do
+ deployment="${entry%%:*}"
+ container="${entry##*:}"
+ kctl --namespace "${namespace}" logs "deployment/${deployment}" -c "${container}" \
+ > "${COMPONENT_LOGS_DIR}/${deployment}.log" 2>&1 || true
+ done
+}
+
+require_runtime_env_file() {
+ if [[ ! -f "${RUNTIME_ENV_FILE}" ]]; then
+ echo "Runtime env file not found: ${RUNTIME_ENV_FILE}" >&2
+ echo "Deploy TFS first so tfs_runtime_env_vars.sh is generated." >&2
+ exit 1
+ fi
+}
+
+run_onboarding() {
+ require_runtime_env_file
+ docker run -t --rm --name "${TEST_NAME}-onboarding" --network=host \
+ --volume "${RUNTIME_ENV_FILE}:/var/teraflow/tfs_runtime_env_vars.sh" \
+ --volume "${RESULTS_DIR}:/opt/results" \
+ "${IMAGE_TAG}" /var/teraflow/run-onboarding.sh
+}
+
+run_osm_test() {
+ local action=$1
+ local variant=$2
+ require_runtime_env_file
+ docker run -t --rm --name "${TEST_NAME}-${variant}-${action}" --network=host \
+ --env OSM_SERVICE_VARIANT="${variant}" \
+ --volume "${RUNTIME_ENV_FILE}:/var/teraflow/tfs_runtime_env_vars.sh" \
+ --volume "${RESULTS_DIR}:/opt/results" \
+ "${IMAGE_TAG}" "/var/teraflow/run-osm-service-${action}.sh"
+}
+
+ping_check() {
+ local src=$1
+ local dst_ip=$2
+ local pattern=$3
+ local output
+ output="$(clab exec --name "${TEST_NAME}" --label clab-node-name="${src}" --cmd "ping -n -c3 ${dst_ip}" --format json)"
+ echo "${output}"
+ if echo "${output}" | grep -E "${pattern}" >/dev/null; then
+ echo "PASSED ${src}->${dst_ip}"
+ return 0
+ fi
+ echo "FAILED ${src}->${dst_ip}"
+ return 1
+}
+
+assert_no_connectivity() {
+ local src=$1
+ local local_ip=$2
+ local local_gw=$3
+ local remote_gw=$4
+ local remote_ip=$5
+ ping_check "${src}" "${local_ip}" "3 packets transmitted, 3 received, 0% packet loss"
+ ping_check "${src}" "${local_gw}" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss"
+ ping_check "${src}" "${remote_gw}" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss"
+ ping_check "${src}" "${remote_ip}" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss"
+}
+
+assert_connectivity() {
+ local src=$1
+ local local_ip=$2
+ local local_gw=$3
+ local remote_gw=$4
+ local remote_ip=$5
+ ping_check "${src}" "${local_ip}" "3 packets transmitted, 3 received, 0% packet loss"
+ ping_check "${src}" "${local_gw}" "3 packets transmitted, 3 received, 0% packet loss"
+ ping_check "${src}" "${remote_gw}" "3 packets transmitted, 3 received, 0% packet loss"
+ ping_check "${src}" "${remote_ip}" "3 packets transmitted, 3 received, 0% packet loss"
+}
+
+run_variant_cycle() {
+ local variant=$1
+ local src=$2
+ local local_ip=$3
+ local local_gw=$4
+ local remote_gw=$5
+ local remote_ip=$6
+
+ echo "==== Starting ${variant} OSM service cycle ===="
+ assert_no_connectivity "${src}" "${local_ip}" "${local_gw}" "${remote_gw}" "${remote_ip}"
+
+ run_osm_test create "${variant}"
+ sleep "${OSM_E2E_WAIT_AFTER_CREATE}"
+ dump_router_configs "after configuring ${variant} OSM service"
+ assert_connectivity "${src}" "${local_ip}" "${local_gw}" "${remote_gw}" "${remote_ip}"
+
+ run_osm_test remove "${variant}"
+ sleep "${OSM_E2E_WAIT_AFTER_REMOVE}"
+ dump_router_configs "after removing ${variant} OSM service"
+ assert_no_connectivity "${src}" "${local_ip}" "${local_gw}" "${remote_gw}" "${remote_ip}"
+}
+
+deploy_tfs() {
+ (
+ cd "${REPO_ROOT}"
+ source "${SCRIPT_DIR}/deploy_specs.sh"
+ ./deploy/crdb.sh
+ ./deploy/nats.sh
+ ./deploy/kafka.sh
+ ./deploy/tfs.sh
+ ./deploy/show.sh
+ )
+ wait_for_tfs_deployments
+}
+
+prepare() {
+ require_cmd docker
+ require_cmd containerlab
+ require_cmd "${KUBECTL_CMD_ARR[0]}"
+ require_cmd "${HELM_CMD_ARR[0]}"
+ require_cmd "${MICROK8S_CMD_ARR[0]}"
+ require_cmd yq
+ mkdir -p "${RESULTS_DIR}" "${COMPONENT_LOGS_DIR}"
+
+ if [[ "${OSM_E2E_CLEAN_START}" == "yes" ]]; then
+ docker_cleanup_for_test
+ wait_for_k8s
+ cleanup_k8s_for_test
+ fi
+
+ wait_for_k8s
+}
+
+run_all() {
+ prepare
+ if [[ "${OSM_E2E_BUILD_IMAGE}" == "yes" ]]; then
+ build_image
+ fi
+ deploy_clab
+ sleep 3
+ dump_router_configs "before any configuration"
+ if [[ "${OSM_E2E_DEPLOY_TFS}" == "yes" ]]; then
+ deploy_tfs
+ fi
+ run_onboarding
+ dump_router_configs "after onboarding scenario"
+ run_variant_cycle "untagged" "dc1_untagged" "172.16.1.10" "172.16.1.1" "172.16.3.1" "172.16.3.10"
+ run_variant_cycle "tagged" "dc3_tagged" "172.17.1.10" "172.17.1.1" "172.17.3.1" "172.17.3.10"
+ dump_component_logs
+}
+
+main() {
+ local phase
+ for phase in "${PHASES[@]}"; do
+ case "${phase}" in
+ all) run_all ;;
+ prepare) prepare ;;
+ build-image) build_image ;;
+ deploy-clab) deploy_clab ;;
+ deploy-tfs) deploy_tfs ;;
+ onboarding) run_onboarding ;;
+ untagged) run_variant_cycle "untagged" "dc1_untagged" "172.16.1.10" "172.16.1.1" "172.16.3.1" "172.16.3.10" ;;
+ tagged) run_variant_cycle "tagged" "dc3_tagged" "172.17.1.10" "172.17.1.1" "172.17.3.1" "172.17.3.10" ;;
+ logs) dump_component_logs ;;
+ destroy-clab) destroy_clab ;;
+ -h|--help|help) usage ;;
+ *)
+ echo "Unknown phase: ${phase}" >&2
+ usage
+ exit 1
+ ;;
+ esac
+ done
+}
+
+main
diff --git a/src/tests/osm_end2end/scripts/run-cleanup.sh b/src/tests/osm_end2end/scripts/run-cleanup.sh
new file mode 100755
index 0000000000000000000000000000000000000000..556495dd18d6504adb7cde508c4e3f370357e91e
--- /dev/null
+++ b/src/tests/osm_end2end/scripts/run-cleanup.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source /var/teraflow/tfs_runtime_env_vars.sh
+export PYTHONPATH=/var/teraflow
+pytest --verbose --log-level=INFO \
+ --junitxml=/opt/results/report_cleanup.xml \
+ /var/teraflow/tests/osm_end2end/tests/test_cleanup.py
diff --git a/src/tests/osm_end2end/scripts/run-onboarding.sh b/src/tests/osm_end2end/scripts/run-onboarding.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a68014e456a79bc3492bfc770b70f2b218da5d33
--- /dev/null
+++ b/src/tests/osm_end2end/scripts/run-onboarding.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source /var/teraflow/tfs_runtime_env_vars.sh
+export PYTHONPATH=/var/teraflow
+pytest --verbose --log-level=INFO \
+ --junitxml=/opt/results/report_onboarding.xml \
+ /var/teraflow/tests/osm_end2end/tests/test_onboarding.py
diff --git a/src/tests/osm_end2end/scripts/run-osm-service-create.sh b/src/tests/osm_end2end/scripts/run-osm-service-create.sh
new file mode 100755
index 0000000000000000000000000000000000000000..83e28b03ee408dfd7412cd88a47aaa23cf211374
--- /dev/null
+++ b/src/tests/osm_end2end/scripts/run-osm-service-create.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source /var/teraflow/tfs_runtime_env_vars.sh
+export PYTHONPATH=/var/teraflow
+OSM_SERVICE_VARIANT="${OSM_SERVICE_VARIANT:-tagged}"
+pytest --verbose --log-level=INFO \
+ --junitxml=/opt/results/report_osm_service_create_${OSM_SERVICE_VARIANT}.xml \
+ /var/teraflow/tests/osm_end2end/tests/test_osm_service_create.py
diff --git a/src/tests/osm_end2end/scripts/run-osm-service-remove.sh b/src/tests/osm_end2end/scripts/run-osm-service-remove.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a18f7d80fe10dce870d3a769abc6aafe193c5ed9
--- /dev/null
+++ b/src/tests/osm_end2end/scripts/run-osm-service-remove.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source /var/teraflow/tfs_runtime_env_vars.sh
+export PYTHONPATH=/var/teraflow
+OSM_SERVICE_VARIANT="${OSM_SERVICE_VARIANT:-tagged}"
+pytest --verbose --log-level=INFO \
+ --junitxml=/opt/results/report_osm_service_remove_${OSM_SERVICE_VARIANT}.xml \
+ /var/teraflow/tests/osm_end2end/tests/test_osm_service_remove.py
diff --git a/src/tests/osm_end2end/tests/Fixtures.py b/src/tests/osm_end2end/tests/Fixtures.py
new file mode 100644
index 0000000000000000000000000000000000000000..fae8401bb83c4240ac25f84a3ae3f7adda0c67ea
--- /dev/null
+++ b/src/tests/osm_end2end/tests/Fixtures.py
@@ -0,0 +1,51 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_http
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from service.client.ServiceClient import ServiceClient
+from .MockOSM import MockOSM
+from .OSM_Constants import WIM_MAPPING
+
+NBI_ADDRESS = get_service_host(ServiceNameEnum.NBI)
+NBI_PORT = get_service_port_http(ServiceNameEnum.NBI)
+NBI_USERNAME = 'admin'
+NBI_PASSWORD = 'admin'
+NBI_BASE_URL = ''
+
+@pytest.fixture(scope='session')
+def osm_wim() -> MockOSM:
+ wim_url = 'http://{:s}:{:d}'.format(NBI_ADDRESS, NBI_PORT)
+ return MockOSM(wim_url, WIM_MAPPING, NBI_USERNAME, NBI_PASSWORD)
+
+@pytest.fixture(scope='session')
+def context_client() -> ContextClient:
+ _client = ContextClient()
+ yield _client
+ _client.close()
+
+@pytest.fixture(scope='session')
+def device_client() -> DeviceClient:
+ _client = DeviceClient()
+ yield _client
+ _client.close()
+
+@pytest.fixture(scope='session')
+def service_client() -> ServiceClient:
+ _client = ServiceClient()
+ yield _client
+ _client.close()
diff --git a/src/tests/osm_end2end/tests/MockOSM.py b/src/tests/osm_end2end/tests/MockOSM.py
new file mode 100644
index 0000000000000000000000000000000000000000..2361b44b6ba5872f5490f3e030c8349b7c7f16ed
--- /dev/null
+++ b/src/tests/osm_end2end/tests/MockOSM.py
@@ -0,0 +1,62 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from .WimconnectorIETFL2VPN import WimconnectorIETFL2VPN
+
+LOGGER = logging.getLogger(__name__)
+
+class MockOSM:
+ def __init__(self, url, mapping, username, password):
+ wim = {'wim_url': url}
+ wim_account = {'user': username, 'password': password}
+ config = {'mapping_not_needed': False, 'service_endpoint_mapping': mapping}
+ self.wim = WimconnectorIETFL2VPN(wim, wim_account, config=config)
+ self.conn_info = {} # internal database emulating OSM storage provided to WIM Connectors
+
+ def create_connectivity_service(self, service_type, connection_points):
+ LOGGER.info('[create_connectivity_service] service_type={:s}'.format(str(service_type)))
+ LOGGER.info('[create_connectivity_service] connection_points={:s}'.format(str(connection_points)))
+ self.wim.check_credentials()
+ result = self.wim.create_connectivity_service(service_type, connection_points)
+ LOGGER.info('[create_connectivity_service] result={:s}'.format(str(result)))
+ service_uuid, conn_info = result
+ self.conn_info[service_uuid] = conn_info
+ return service_uuid
+
+ def get_connectivity_service_status(self, service_uuid):
+ LOGGER.info('[get_connectivity_service] service_uuid={:s}'.format(str(service_uuid)))
+ conn_info = self.conn_info.get(service_uuid)
+ if conn_info is None: raise Exception('ServiceId({:s}) not found'.format(str(service_uuid)))
+ LOGGER.info('[get_connectivity_service] conn_info={:s}'.format(str(conn_info)))
+ self.wim.check_credentials()
+ result = self.wim.get_connectivity_service_status(service_uuid, conn_info=conn_info)
+ LOGGER.info('[get_connectivity_service] result={:s}'.format(str(result)))
+ return result
+
+ def edit_connectivity_service(self, service_uuid, connection_points):
+ LOGGER.info('[edit_connectivity_service] service_uuid={:s}'.format(str(service_uuid)))
+ LOGGER.info('[edit_connectivity_service] connection_points={:s}'.format(str(connection_points)))
+ conn_info = self.conn_info.get(service_uuid)
+ if conn_info is None: raise Exception('ServiceId({:s}) not found'.format(str(service_uuid)))
+ LOGGER.info('[edit_connectivity_service] conn_info={:s}'.format(str(conn_info)))
+ self.wim.edit_connectivity_service(service_uuid, conn_info=conn_info, connection_points=connection_points)
+
+ def delete_connectivity_service(self, service_uuid):
+ LOGGER.info('[delete_connectivity_service] service_uuid={:s}'.format(str(service_uuid)))
+ conn_info = self.conn_info.get(service_uuid)
+ if conn_info is None: raise Exception('ServiceId({:s}) not found'.format(str(service_uuid)))
+ LOGGER.info('[delete_connectivity_service] conn_info={:s}'.format(str(conn_info)))
+ self.wim.check_credentials()
+ self.wim.delete_connectivity_service(service_uuid, conn_info=conn_info)
diff --git a/src/tests/osm_end2end/tests/OSM_Constants.py b/src/tests/osm_end2end/tests/OSM_Constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..0445325b5629c6381e38c40e0f25137cb086eb39
--- /dev/null
+++ b/src/tests/osm_end2end/tests/OSM_Constants.py
@@ -0,0 +1,114 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+from typing import Dict, List, Optional
+
+SERVICE_VARIANT_UNTAGGED = 'untagged'
+SERVICE_VARIANT_TAGGED = 'tagged'
+DEFAULT_SERVICE_VARIANT = SERVICE_VARIANT_TAGGED
+
+
+def _connection_point(service_endpoint_id: str, vlan_id: Optional[int] = None) -> Dict:
+ connection_point = {
+ 'service_endpoint_id': service_endpoint_id,
+ 'service_endpoint_encapsulation_type': 'none',
+ }
+ if vlan_id is not None:
+ connection_point['service_endpoint_encapsulation_type'] = 'dot1q'
+ connection_point['service_endpoint_encapsulation_info'] = {'vlan': vlan_id}
+ return connection_point
+
+
+def get_service_variant() -> str:
+ service_variant = os.environ.get('OSM_SERVICE_VARIANT', DEFAULT_SERVICE_VARIANT)
+ service_variant = str(service_variant).strip().lower()
+ if service_variant not in {SERVICE_VARIANT_UNTAGGED, SERVICE_VARIANT_TAGGED}:
+ msg = 'Unsupported OSM service variant: {:s}'.format(str(service_variant))
+ raise ValueError(msg)
+ return service_variant
+
+
+def get_service_connection_points(service_variant: Optional[str] = None) -> List[Dict]:
+ service_variant = get_service_variant() if service_variant is None else service_variant
+ if service_variant == SERVICE_VARIANT_UNTAGGED:
+ return [
+ _connection_point('ep-untagged-1'),
+ _connection_point('ep-untagged-2'),
+ ]
+ if service_variant == SERVICE_VARIANT_TAGGED:
+ return [
+ _connection_point('ep-tagged-1', vlan_id=125),
+ _connection_point('ep-tagged-2', vlan_id=125),
+ ]
+ msg = 'Unsupported OSM service variant: {:s}'.format(str(service_variant))
+ raise ValueError(msg)
+
+
+# Ref: https://osm.etsi.org/wikipub/index.php/WIM
+WIM_MAPPING = [
+ {
+ 'device-id': 'dc1_untagged', # pop_switch_dpid
+ #'device_interface_id' : ??, # pop_switch_port
+ 'service_endpoint_id': 'ep-untagged-1', # wan_service_endpoint_id
+ 'service_mapping_info': { # wan_service_mapping_info, other extra info
+ 'bearer': {'bearer-reference': 'OSM-E2E:r1:Ethernet10'},
+ 'site-id': '1',
+ },
+ #'switch_dpid' : ??, # wan_switch_dpid
+ #'switch_port' : ??, # wan_switch_port
+ #'datacenter_id' : ??, # vim_account
+ },
+ {
+ 'device-id': 'dc2_untagged', # pop_switch_dpid
+ #'device_interface_id' : ??, # pop_switch_port
+ 'service_endpoint_id': 'ep-untagged-2', # wan_service_endpoint_id
+ 'service_mapping_info': { # wan_service_mapping_info, other extra info
+ 'bearer': {'bearer-reference': 'OSM-E2E:r3:Ethernet10'},
+ 'site-id': '2',
+ },
+ #'switch_dpid' : ??, # wan_switch_dpid
+ #'switch_port' : ??, # wan_switch_port
+ #'datacenter_id' : ??, # vim_account
+ },
+ {
+ 'device-id': 'dc3_tagged', # pop_switch_dpid
+ #'device_interface_id' : ??, # pop_switch_port
+ 'service_endpoint_id': 'ep-tagged-1', # wan_service_endpoint_id
+ 'service_mapping_info': { # wan_service_mapping_info, other extra info
+ 'bearer': {'bearer-reference': 'OSM-E2E:r1:Ethernet11'},
+ 'site-id': '1',
+ },
+ #'switch_dpid' : ??, # wan_switch_dpid
+ #'switch_port' : ??, # wan_switch_port
+ #'datacenter_id' : ??, # vim_account
+ },
+ {
+ 'device-id': 'dc4_tagged', # pop_switch_dpid
+ #'device_interface_id' : ??, # pop_switch_port
+ 'service_endpoint_id': 'ep-tagged-2', # wan_service_endpoint_id
+ 'service_mapping_info': { # wan_service_mapping_info, other extra info
+ 'bearer': {'bearer-reference': 'OSM-E2E:r3:Ethernet11'},
+ 'site-id': '2',
+ },
+ #'switch_dpid' : ??, # wan_switch_dpid
+ #'switch_port' : ??, # wan_switch_port
+ #'datacenter_id' : ??, # vim_account
+ },
+]
+
+SERVICE_TYPE = 'ELINE'
+SERVICE_VARIANT = get_service_variant()
+SERVICE_CONNECTION_POINTS = get_service_connection_points(SERVICE_VARIANT)
diff --git a/src/tests/osm_end2end/tests/WimconnectorIETFL2VPN.py b/src/tests/osm_end2end/tests/WimconnectorIETFL2VPN.py
new file mode 100644
index 0000000000000000000000000000000000000000..de940a7d2546885fe50dedbe8aa7d402730f6aa6
--- /dev/null
+++ b/src/tests/osm_end2end/tests/WimconnectorIETFL2VPN.py
@@ -0,0 +1,545 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 Telefonica
+# All Rights Reserved.
+#
+# Contributors: Oscar Gonzalez de Dios, Manuel Lopez Bravo, Guillermo Pajares Martin
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This work has been performed in the context of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 program.
+##
+"""The SDN/WIM connector is responsible for establishing wide area network
+connectivity.
+
+This SDN/WIM connector implements the standard IETF RFC 8466 "A YANG Data
+ Model for Layer 2 Virtual Private Network (L2VPN) Service Delivery"
+
+It receives the endpoints and the necessary details to request
+the Layer 2 service.
+"""
+import requests
+import uuid
+import logging
+import copy
+#from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError
+from .sdnconn import SdnConnectorBase, SdnConnectorError
+
+"""Check layer where we move it"""
+
+
+class WimconnectorIETFL2VPN(SdnConnectorBase):
+ def __init__(self, wim, wim_account, config=None, logger=None):
+ """IETF L2VPN WIM connector
+
+ Arguments: (To be completed)
+ wim (dict): WIM record, as stored in the database
+ wim_account (dict): WIM account record, as stored in the database
+ """
+ self.logger = logging.getLogger("ro.sdn.ietfl2vpn")
+ super().__init__(wim, wim_account, config, logger)
+ self.headers = {"Content-Type": "application/json"}
+ self.mappings = {
+ m["service_endpoint_id"]: m for m in self.service_endpoint_mapping
+ }
+ self.user = wim_account.get("user")
+ self.passwd = wim_account.get("password") # replace "passwordd" -> "password"
+
+ if self.user and self.passwd is not None:
+ self.auth = (self.user, self.passwd)
+ else:
+ self.auth = None
+
+ self.logger.info("IETFL2VPN Connector Initialized.")
+
+ def check_credentials(self):
+ endpoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+ self.wim["wim_url"]
+ )
+
+ try:
+ response = requests.get(endpoint, auth=self.auth)
+ http_code = response.status_code
+ except requests.exceptions.RequestException as e:
+ raise SdnConnectorError(e.response, http_code=503)
+
+ if http_code != 200:
+ raise SdnConnectorError("Failed while authenticating", http_code=http_code)
+
+ self.logger.info("Credentials checked")
+
+ def get_connectivity_service_status(self, service_uuid, conn_info=None):
+ """Monitor the status of the connectivity service stablished
+
+ Arguments:
+ service_uuid: Connectivity service unique identifier
+
+ Returns:
+ Examples::
+ {'sdn_status': 'ACTIVE'}
+ {'sdn_status': 'INACTIVE'}
+ {'sdn_status': 'DOWN'}
+ {'sdn_status': 'ERROR'}
+ """
+ try:
+ self.logger.info("Sending get connectivity service stuatus")
+ servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
+ self.wim["wim_url"], service_uuid
+ )
+ response = requests.get(servicepoint, auth=self.auth)
+ self.logger.warning('response.status_code={:s}'.format(str(response.status_code)))
+ if response.status_code != requests.codes.ok:
+ raise SdnConnectorError(
+ "Unable to obtain connectivity servcice status",
+ http_code=response.status_code,
+ )
+
+ service_status = {"sdn_status": "ACTIVE"}
+
+ return service_status
+ except requests.exceptions.ConnectionError:
+ raise SdnConnectorError("Request Timeout", http_code=408)
+
+ def search_mapp(self, connection_point):
+ id = connection_point["service_endpoint_id"]
+ if id not in self.mappings:
+ raise SdnConnectorError("Endpoint {} not located".format(str(id)))
+ else:
+ return self.mappings[id]
+
+ def create_connectivity_service(self, service_type, connection_points, **kwargs):
+ """Stablish WAN connectivity between the endpoints
+
+ Arguments:
+ service_type (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2),
+ ``L3``.
+ connection_points (list): each point corresponds to
+ an entry point from the DC to the transport network. One
+ connection point serves to identify the specific access and
+ some other service parameters, such as encapsulation type.
+ Represented by a dict as follows::
+
+ {
+ "service_endpoint_id": ..., (str[uuid])
+ "service_endpoint_encapsulation_type": ...,
+ (enum: none, dot1q, ...)
+ "service_endpoint_encapsulation_info": {
+ ... (dict)
+ "vlan": ..., (int, present if encapsulation is dot1q)
+ "vni": ... (int, present if encapsulation is vxlan),
+ "peers": [(ipv4_1), (ipv4_2)]
+ (present if encapsulation is vxlan)
+ }
+ }
+
+ The service endpoint ID should be previously informed to the WIM
+ engine in the RO when the WIM port mapping is registered.
+
+ Keyword Arguments:
+ bandwidth (int): value in kilobytes
+ latency (int): value in milliseconds
+
+ Other QoS might be passed as keyword arguments.
+
+ Returns:
+ tuple: ``(service_id, conn_info)`` containing:
+ - *service_uuid* (str): UUID of the established connectivity
+ service
+ - *conn_info* (dict or None): Information to be stored at the
+ database (or ``None``). This information will be provided to
+ the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
+ **MUST** be JSON/YAML-serializable (plain data structures).
+
+ Raises:
+ SdnConnectorException: In case of error.
+ """
+ SETTINGS = { # min_endpoints, max_endpoints, vpn_service_type
+ 'ELINE': (2, 2, 'vpws'), # Virtual Private Wire Service
+ 'ELAN' : (2, None, 'vpls'), # Virtual Private LAN Service
+ }
+ settings = SETTINGS.get(service_type)
+ if settings is None: raise NotImplementedError('Unsupported service_type({:s})'.format(str(service_type)))
+ min_endpoints, max_endpoints, vpn_service_type = settings
+
+ if max_endpoints is not None and len(connection_points) > max_endpoints:
+ msg = "Connections between more than {:d} endpoints are not supported for service_type {:s}"
+ raise SdnConnectorError(msg.format(max_endpoints, service_type))
+
+ if min_endpoints is not None and len(connection_points) < min_endpoints:
+ msg = "Connections must be of at least {:d} endpoints for service_type {:s}"
+ raise SdnConnectorError(msg.format(min_endpoints, service_type))
+
+ """First step, create the vpn service"""
+ uuid_l2vpn = str(uuid.uuid4())
+ vpn_service = {}
+ vpn_service["vpn-id"] = uuid_l2vpn
+ vpn_service["vpn-svc-type"] = vpn_service_type
+ vpn_service["svc-topo"] = "any-to-any"
+ vpn_service["customer-name"] = "osm"
+ vpn_service_list = []
+ vpn_service_list.append(vpn_service)
+ vpn_service_l = {"ietf-l2vpn-svc:vpn-service": vpn_service_list}
+ response_service_creation = None
+ conn_info = []
+ self.logger.info("Sending vpn-service : {:s}".format(str(vpn_service_l)))
+
+ try:
+ endpoint_service_creation = (
+ "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+ self.wim["wim_url"]
+ )
+ )
+ response_service_creation = requests.post(
+ endpoint_service_creation,
+ headers=self.headers,
+ json=vpn_service_l,
+ auth=self.auth,
+ )
+ except requests.exceptions.ConnectionError:
+ raise SdnConnectorError(
+ "Request to create service Timeout", http_code=408
+ )
+
+ if response_service_creation.status_code == 409:
+ raise SdnConnectorError(
+ "Service already exists",
+ http_code=response_service_creation.status_code,
+ )
+ elif response_service_creation.status_code != requests.codes.created:
+ raise SdnConnectorError(
+ "Request to create service not accepted",
+ http_code=response_service_creation.status_code,
+ )
+
+ self.logger.info('connection_points = {:s}'.format(str(connection_points)))
+
+ # Check if protected paths are requested
+ extended_connection_points = []
+ for connection_point in connection_points:
+ extended_connection_points.append(connection_point)
+
+ connection_point_wan_info = self.search_mapp(connection_point)
+ service_mapping_info = connection_point_wan_info.get('service_mapping_info', {})
+ redundant_service_endpoint_ids = service_mapping_info.get('redundant')
+
+ if redundant_service_endpoint_ids is None: continue
+ if len(redundant_service_endpoint_ids) == 0: continue
+
+ for redundant_service_endpoint_id in redundant_service_endpoint_ids:
+ redundant_connection_point = copy.deepcopy(connection_point)
+ redundant_connection_point['service_endpoint_id'] = redundant_service_endpoint_id
+ extended_connection_points.append(redundant_connection_point)
+
+ self.logger.info('extended_connection_points = {:s}'.format(str(extended_connection_points)))
+
+ """Second step, create the connections and vpn attachments"""
+ for connection_point in extended_connection_points:
+ connection_point_wan_info = self.search_mapp(connection_point)
+ site_network_access = {}
+ connection = {}
+
+ if connection_point["service_endpoint_encapsulation_type"] != "none":
+ if (
+ connection_point["service_endpoint_encapsulation_type"]
+ == "dot1q"
+ ):
+ """The connection is a VLAN"""
+ connection["encapsulation-type"] = "dot1q-vlan-tagged"
+ tagged = {}
+ tagged_interf = {}
+ service_endpoint_encapsulation_info = connection_point[
+ "service_endpoint_encapsulation_info"
+ ]
+
+ if service_endpoint_encapsulation_info["vlan"] is None:
+ raise SdnConnectorError("VLAN must be provided")
+
+ tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info[
+ "vlan"
+ ]
+ tagged["dot1q-vlan-tagged"] = tagged_interf
+ connection["tagged-interface"] = tagged
+ else:
+ raise NotImplementedError("Encapsulation type not implemented")
+
+ site_network_access["connection"] = connection
+ self.logger.info("Sending connection:{}".format(connection))
+ vpn_attach = {}
+ vpn_attach["vpn-id"] = uuid_l2vpn
+ vpn_attach["site-role"] = vpn_service["svc-topo"] + "-role"
+ site_network_access["vpn-attachment"] = vpn_attach
+ self.logger.info("Sending vpn-attachement :{}".format(vpn_attach))
+ uuid_sna = str(uuid.uuid4())
+ site_network_access["network-access-id"] = uuid_sna
+ site_network_access["bearer"] = connection_point_wan_info[
+ "service_mapping_info"
+ ]["bearer"]
+
+ access_priority = connection_point_wan_info["service_mapping_info"].get("priority")
+ if access_priority is not None:
+ availability = {}
+ availability["access-priority"] = access_priority
+ availability["single-active"] = [None]
+ site_network_access["availability"] = availability
+
+ constraint = {}
+ constraint['constraint-type'] = 'end-to-end-diverse'
+ constraint['target'] = {'all-other-accesses': [None]}
+
+ access_diversity = {}
+ access_diversity['constraints'] = {'constraint': []}
+ access_diversity['constraints']['constraint'].append(constraint)
+ site_network_access["access-diversity"] = access_diversity
+
+ site_network_accesses = {}
+ site_network_access_list = []
+ site_network_access_list.append(site_network_access)
+ site_network_accesses[
+ "ietf-l2vpn-svc:site-network-access"
+ ] = site_network_access_list
+ conn_info_d = {}
+ conn_info_d["site"] = connection_point_wan_info["service_mapping_info"][
+ "site-id"
+ ]
+ conn_info_d["site-network-access-id"] = site_network_access[
+ "network-access-id"
+ ]
+ conn_info_d["mapping"] = None
+ conn_info.append(conn_info_d)
+
+ self.logger.info("Sending site_network_accesses : {:s}".format(str(site_network_accesses)))
+
+ try:
+ endpoint_site_network_access_creation = (
+ "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/"
+ "sites/site={}/site-network-accesses/".format(
+ self.wim["wim_url"],
+ connection_point_wan_info["service_mapping_info"][
+ "site-id"
+ ],
+ )
+ )
+ response_endpoint_site_network_access_creation = requests.post(
+ endpoint_site_network_access_creation,
+ headers=self.headers,
+ json=site_network_accesses,
+ auth=self.auth,
+ )
+
+ if (
+ response_endpoint_site_network_access_creation.status_code
+ == 409
+ ):
+ self.delete_connectivity_service(vpn_service["vpn-id"])
+
+ raise SdnConnectorError(
+ "Site_Network_Access with ID '{}' already exists".format(
+ site_network_access["network-access-id"]
+ ),
+ http_code=response_endpoint_site_network_access_creation.status_code,
+ )
+ elif (
+ response_endpoint_site_network_access_creation.status_code
+ == 400
+ ):
+ self.delete_connectivity_service(vpn_service["vpn-id"])
+
+ raise SdnConnectorError(
+ "Site {} does not exist".format(
+ connection_point_wan_info["service_mapping_info"][
+ "site-id"
+ ]
+ ),
+ http_code=response_endpoint_site_network_access_creation.status_code,
+ )
+ elif (
+ response_endpoint_site_network_access_creation.status_code
+ != requests.codes.created
+ and response_endpoint_site_network_access_creation.status_code
+ != requests.codes.no_content
+ ):
+ self.delete_connectivity_service(vpn_service["vpn-id"])
+
+ raise SdnConnectorError(
+ "Request not accepted",
+ http_code=response_endpoint_site_network_access_creation.status_code,
+ )
+ except requests.exceptions.ConnectionError:
+ self.delete_connectivity_service(vpn_service["vpn-id"])
+
+ raise SdnConnectorError("Request Timeout", http_code=408)
+
+ return uuid_l2vpn, conn_info
+
+ def delete_connectivity_service(self, service_uuid, conn_info=None):
+ """Disconnect multi-site endpoints previously connected
+
+ This method should receive as the first argument the UUID generated by
+ the ``create_connectivity_service``
+ """
+ try:
+ self.logger.info("Sending delete")
+ servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
+ self.wim["wim_url"], service_uuid
+ )
+ response = requests.delete(servicepoint, auth=self.auth)
+
+ if response.status_code != requests.codes.no_content:
+ raise SdnConnectorError(
+ "Error in the request", http_code=response.status_code
+ )
+ except requests.exceptions.ConnectionError:
+ raise SdnConnectorError("Request Timeout", http_code=408)
+
+ def edit_connectivity_service(
+ self, service_uuid, conn_info=None, connection_points=None, **kwargs
+ ):
+ """Change an existing connectivity service, see
+ ``create_connectivity_service``"""
+ # sites = {"sites": {}}
+ # site_list = []
+ vpn_service = {}
+ vpn_service["svc-topo"] = "any-to-any"
+ counter = 0
+
+ for connection_point in connection_points:
+ site_network_access = {}
+ connection_point_wan_info = self.search_mapp(connection_point)
+ params_site = {}
+ params_site["site-id"] = connection_point_wan_info["service_mapping_info"][
+ "site-id"
+ ]
+ params_site["site-vpn-flavor"] = "site-vpn-flavor-single"
+ device_site = {}
+ device_site["device-id"] = connection_point_wan_info["device-id"]
+ params_site["devices"] = device_site
+ # network_access = {}
+ connection = {}
+
+ if connection_point["service_endpoint_encapsulation_type"] != "none":
+ if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
+ """The connection is a VLAN"""
+ connection["encapsulation-type"] = "dot1q-vlan-tagged"
+ tagged = {}
+ tagged_interf = {}
+ service_endpoint_encapsulation_info = connection_point[
+ "service_endpoint_encapsulation_info"
+ ]
+
+ if service_endpoint_encapsulation_info["vlan"] is None:
+ raise SdnConnectorError("VLAN must be provided")
+
+ tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info[
+ "vlan"
+ ]
+ tagged["dot1q-vlan-tagged"] = tagged_interf
+ connection["tagged-interface"] = tagged
+ else:
+ raise NotImplementedError("Encapsulation type not implemented")
+
+ site_network_access["connection"] = connection
+ vpn_attach = {}
+ vpn_attach["vpn-id"] = service_uuid
+ vpn_attach["site-role"] = vpn_service["svc-topo"] + "-role"
+ site_network_access["vpn-attachment"] = vpn_attach
+ uuid_sna = conn_info[counter]["site-network-access-id"]
+ site_network_access["network-access-id"] = uuid_sna
+ site_network_access["bearer"] = connection_point_wan_info[
+ "service_mapping_info"
+ ]["bearer"]
+ site_network_accesses = {}
+ site_network_access_list = []
+ site_network_access_list.append(site_network_access)
+ site_network_accesses[
+ "ietf-l2vpn-svc:site-network-access"
+ ] = site_network_access_list
+
+ try:
+ endpoint_site_network_access_edit = (
+ "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/"
+ "sites/site={}/site-network-accesses/".format(
+ self.wim["wim_url"],
+ connection_point_wan_info["service_mapping_info"]["site-id"],
+ )
+ )
+ response_endpoint_site_network_access_creation = requests.put(
+ endpoint_site_network_access_edit,
+ headers=self.headers,
+ json=site_network_accesses,
+ auth=self.auth,
+ )
+
+ if response_endpoint_site_network_access_creation.status_code == 400:
+ raise SdnConnectorError(
+ "Service does not exist",
+ http_code=response_endpoint_site_network_access_creation.status_code,
+ )
+ elif (
+ response_endpoint_site_network_access_creation.status_code != 201
+ and response_endpoint_site_network_access_creation.status_code
+ != 204
+ ):
+ raise SdnConnectorError(
+ "Request no accepted",
+ http_code=response_endpoint_site_network_access_creation.status_code,
+ )
+ except requests.exceptions.ConnectionError:
+ raise SdnConnectorError("Request Timeout", http_code=408)
+
+ counter += 1
+
+ return None
+
+ def clear_all_connectivity_services(self):
+ """Delete all WAN Links corresponding to a WIM"""
+ try:
+ self.logger.info("Sending clear all connectivity services")
+ servicepoint = (
+ "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+ self.wim["wim_url"]
+ )
+ )
+ response = requests.delete(servicepoint, auth=self.auth)
+
+ if response.status_code != requests.codes.no_content:
+ raise SdnConnectorError(
+ "Unable to clear all connectivity services",
+ http_code=response.status_code,
+ )
+ except requests.exceptions.ConnectionError:
+ raise SdnConnectorError("Request Timeout", http_code=408)
+
+ def get_all_active_connectivity_services(self):
+ """Provide information about all active connections provisioned by a
+ WIM
+ """
+ try:
+ self.logger.info("Sending get all connectivity services")
+ servicepoint = (
+ "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+ self.wim["wim_url"]
+ )
+ )
+ response = requests.get(servicepoint, auth=self.auth)
+
+ if response.status_code != requests.codes.ok:
+ raise SdnConnectorError(
+ "Unable to get all connectivity services",
+ http_code=response.status_code,
+ )
+
+ return response
+ except requests.exceptions.ConnectionError:
+ raise SdnConnectorError("Request Timeout", http_code=408)
diff --git a/src/tests/osm_end2end/tests/__init__.py b/src/tests/osm_end2end/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ccc21c7db78aac26daa1f8c5ff8e1ffd3f35460
--- /dev/null
+++ b/src/tests/osm_end2end/tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/tests/osm_end2end/tests/acknowledgements.txt b/src/tests/osm_end2end/tests/acknowledgements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b7ce926dd006d9bc8afaffbed212d90fb05adbef
--- /dev/null
+++ b/src/tests/osm_end2end/tests/acknowledgements.txt
@@ -0,0 +1,3 @@
+MockOSM is based on source code taken from:
+https://osm.etsi.org/gitlab/osm/ro/-/blob/master/RO-plugin/osm_ro_plugin/sdnconn.py
+https://osm.etsi.org/gitlab/osm/ro/-/blob/master/RO-SDN-ietfl2vpn/osm_rosdn_ietfl2vpn/wimconn_ietfl2vpn.py
diff --git a/src/tests/osm_end2end/tests/sdnconn.py b/src/tests/osm_end2end/tests/sdnconn.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1849c9ef3e1a1260ff42bbadabc99f91a6435d7
--- /dev/null
+++ b/src/tests/osm_end2end/tests/sdnconn.py
@@ -0,0 +1,242 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with:
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+"""The SDN connector is responsible for establishing both wide area network connectivity (WIM)
+and intranet SDN connectivity.
+
+It receives information from ports to be connected .
+"""
+
+import logging
+from http import HTTPStatus
+
+
+class SdnConnectorError(Exception):
+ """Base Exception for all connector related errors
+ provide the parameter 'http_code' (int) with the error code:
+ Bad_Request = 400
+ Unauthorized = 401 (e.g. credentials are not valid)
+ Not_Found = 404 (e.g. try to edit or delete a non existing connectivity service)
+ Forbidden = 403
+ Method_Not_Allowed = 405
+ Not_Acceptable = 406
+ Request_Timeout = 408 (e.g timeout reaching server, or cannot reach the server)
+ Conflict = 409
+ Service_Unavailable = 503
+ Internal_Server_Error = 500
+ """
+
+ def __init__(self, message, http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value):
+ Exception.__init__(self, message)
+ self.http_code = http_code
+
+
+class SdnConnectorBase(object):
+ """Abstract base class for all the SDN connectors
+
+ Arguments:
+ wim (dict): WIM record, as stored in the database
+ wim_account (dict): WIM account record, as stored in the database
+ config
+ The arguments of the constructor are converted to object attributes.
+ An extra property, ``service_endpoint_mapping`` is created from ``config``.
+ """
+
+ def __init__(self, wim, wim_account, config=None, logger=None):
+ """
+ :param wim: (dict). Contains among others 'wim_url'
+ :param wim_account: (dict). Contains among others 'uuid' (internal id), 'name',
+ 'sdn' (True if is intended for SDN-assist or False if intended for WIM), 'user', 'password'.
+ :param config: (dict or None): Particular information of plugin. These keys if present have a common meaning:
+ 'mapping_not_needed': (bool) False by default or if missing, indicates that mapping is not needed.
+ 'service_endpoint_mapping': (list) provides the internal endpoint mapping. The meaning is:
+ KEY meaning for WIM meaning for SDN assist
+ -------- -------- --------
+ device_id pop_switch_dpid compute_id
+ device_interface_id pop_switch_port compute_pci_address
+ service_endpoint_id wan_service_endpoint_id SDN_service_endpoint_id
+ service_mapping_info wan_service_mapping_info SDN_service_mapping_info
+ contains extra information if needed. Text in Yaml format
+ switch_dpid wan_switch_dpid SDN_switch_dpid
+ switch_port wan_switch_port SDN_switch_port
+ datacenter_id vim_account vim_account
+ id: (internal, do not use)
+ wim_id: (internal, do not use)
+ :param logger (logging.Logger): optional logger object. If none is passed 'openmano.sdn.sdnconn' is used.
+ """
+ self.logger = logger or logging.getLogger("ro.sdn")
+ self.wim = wim
+ self.wim_account = wim_account
+ self.config = config or {}
+ self.service_endpoint_mapping = self.config.get("service_endpoint_mapping", [])
+
+ def check_credentials(self):
+ """Check if the connector itself can access the SDN/WIM with the provided url (wim.wim_url),
+ user (wim_account.user), and password (wim_account.password)
+
+ Raises:
+ SdnConnectorError: Issues regarding authorization, access to
+ external URLs, etc are detected.
+ """
+ raise NotImplementedError
+
+ def get_connectivity_service_status(self, service_uuid, conn_info=None):
+ """Monitor the status of the connectivity service established
+
+ Arguments:
+ service_uuid (str): UUID of the connectivity service
+ conn_info (dict or None): Information returned by the connector
+ during the service creation/edition and subsequently stored in
+ the database.
+
+ Returns:
+ dict: JSON/YAML-serializable dict that contains a mandatory key
+ ``sdn_status`` associated with one of the following values::
+
+ {'sdn_status': 'ACTIVE'}
+ # The service is up and running.
+
+ {'sdn_status': 'INACTIVE'}
+ # The service was created, but the connector
+ # cannot determine yet if connectivity exists
+ # (ideally, the caller needs to wait and check again).
+
+ {'sdn_status': 'DOWN'}
+ # Connection was previously established,
+ # but an error/failure was detected.
+
+ {'sdn_status': 'ERROR'}
+ # An error occurred when trying to create the service/
+ # establish the connectivity.
+
+ {'sdn_status': 'BUILD'}
+ # Still trying to create the service, the caller
+ # needs to wait and check again.
+
+ Additionally ``error_msg``(**str**) and ``sdn_info``(**dict**)
+ keys can be used to provide additional status explanation or
+ new information available for the connectivity service.
+ """
+ raise NotImplementedError
+
+ def create_connectivity_service(self, service_type, connection_points, **kwargs):
+ """
+ Establish SDN/WAN connectivity between the endpoints
+ :param service_type: (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2), ``L3``.
+ :param connection_points: (list): each point corresponds to
+ an entry point to be connected. For WIM: from the DC to the transport network.
+ For SDN: Compute/PCI to the transport network. One
+ connection point serves to identify the specific access and
+ some other service parameters, such as encapsulation type.
+ Each item of the list is a dict with:
+ "service_endpoint_id": (str)(uuid) Same meaning that for 'service_endpoint_mapping' (see __init__)
+ In case the config attribute mapping_not_needed is True, this value is not relevant. In this case
+ it will contain the string "device_id:device_interface_id"
+ "service_endpoint_encapsulation_type": None, "dot1q", ...
+ "service_endpoint_encapsulation_info": (dict) with:
+ "vlan": ..., (int, present if encapsulation is dot1q)
+ "vni": ... (int, present if encapsulation is vxlan),
+ "peers": [(ipv4_1), (ipv4_2)] (present if encapsulation is vxlan)
+ "mac": ...
+ "device_id": ..., same meaning that for 'service_endpoint_mapping' (see __init__)
+ "device_interface_id": same meaning that for 'service_endpoint_mapping' (see __init__)
+ "switch_dpid": ..., present if mapping has been found for this device_id,device_interface_id
+ "swith_port": ... present if mapping has been found for this device_id,device_interface_id
+ "service_mapping_info": present if mapping has been found for this device_id,device_interface_id
+ :param kwargs: For future versions:
+ bandwidth (int): value in kilobytes
+ latency (int): value in milliseconds
+ Other QoS might be passed as keyword arguments.
+ :return: tuple: ``(service_id, conn_info)`` containing:
+ - *service_uuid* (str): UUID of the established connectivity service
+ - *conn_info* (dict or None): Information to be stored at the database (or ``None``).
+ This information will be provided to the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
+ **MUST** be JSON/YAML-serializable (plain data structures).
+ :raises: SdnConnectorException: In case of error. Nothing should be created in this case.
+ Provide the parameter http_code
+ """
+ raise NotImplementedError
+
+ def delete_connectivity_service(self, service_uuid, conn_info=None):
+ """
+ Disconnect multi-site endpoints previously connected
+
+ :param service_uuid: The one returned by create_connectivity_service
+ :param conn_info: The one returned by last call to 'create_connectivity_service' or 'edit_connectivity_service'
+ if they do not return None
+ :return: None
+ :raises: SdnConnectorException: In case of error. The parameter http_code must be filled
+ """
+ raise NotImplementedError
+
+ def edit_connectivity_service(
+ self, service_uuid, conn_info=None, connection_points=None, **kwargs
+ ):
+ """Change an existing connectivity service.
+
+ This method's arguments and return value follow the same convention as
+ :meth:`~.create_connectivity_service`.
+
+ :param service_uuid: UUID of the connectivity service.
+ :param conn_info: (dict or None): Information previously returned by last call to create_connectivity_service
+ or edit_connectivity_service
+ :param connection_points: (list): If provided, the old list of connection points will be replaced.
+ :param kwargs: Same meaning that create_connectivity_service
+ :return: dict or None: Information to be updated and stored at the database.
+ When ``None`` is returned, no information should be changed.
+ When an empty dict is returned, the database record will be deleted.
+ **MUST** be JSON/YAML-serializable (plain data structures).
+ Raises:
+ SdnConnectorException: In case of error.
+ """
+
+ def clear_all_connectivity_services(self):
+ """Delete all WAN Links in a WIM.
+
+ This method is intended for debugging only, and should delete all the
+ connections controlled by the WIM/SDN, not only the connections that
+ a specific RO is aware of.
+
+ Raises:
+ SdnConnectorException: In case of error.
+ """
+ raise NotImplementedError
+
+ def get_all_active_connectivity_services(self):
+ """Provide information about all active connections provisioned by a
+ WIM.
+
+ Raises:
+ SdnConnectorException: In case of error.
+ """
+ raise NotImplementedError
diff --git a/src/tests/osm_end2end/tests/test_cleanup.py b/src/tests/osm_end2end/tests/test_cleanup.py
new file mode 100644
index 0000000000000000000000000000000000000000..20afb5fe02d63f64de45fe87830e8996302c4395
--- /dev/null
+++ b/src/tests/osm_end2end/tests/test_cleanup.py
@@ -0,0 +1,44 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, os
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId
+from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario
+from common.tools.object_factory.Context import json_context_id
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from .Fixtures import context_client, device_client # pylint: disable=unused-import
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-topology.json')
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+
+def test_scenario_cleanup(
+ context_client : ContextClient, # pylint: disable=redefined-outer-name
+ device_client : DeviceClient, # pylint: disable=redefined-outer-name
+) -> None:
+ # Verify the scenario has no services/slices
+ response = context_client.GetContext(ADMIN_CONTEXT_ID)
+ assert len(response.service_ids) == 0
+ assert len(response.slice_ids) == 0
+
+ # Load descriptors and validate the base scenario
+ descriptor_loader = DescriptorLoader(
+ descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client)
+ descriptor_loader.validate()
+ descriptor_loader.unload()
+ validate_empty_scenario(context_client)
diff --git a/src/tests/osm_end2end/tests/test_onboarding.py b/src/tests/osm_end2end/tests/test_onboarding.py
new file mode 100644
index 0000000000000000000000000000000000000000..763d7da171c99b781a6d25fc01e3c10c340bfb43
--- /dev/null
+++ b/src/tests/osm_end2end/tests/test_onboarding.py
@@ -0,0 +1,67 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, os, time
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty
+from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
+from common.tools.object_factory.Context import json_context_id
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from .Fixtures import context_client, device_client # pylint: disable=unused-import
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-topology.json')
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+
+def test_scenario_onboarding(
+ context_client : ContextClient, # pylint: disable=redefined-outer-name
+ device_client : DeviceClient, # pylint: disable=redefined-outer-name
+) -> None:
+ validate_empty_scenario(context_client)
+
+ descriptor_loader = DescriptorLoader(
+ descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client)
+ results = descriptor_loader.process()
+ check_descriptor_load_results(results, descriptor_loader)
+ descriptor_loader.validate()
+
+ # Verify the scenario has no services/slices
+ response = context_client.GetContext(ADMIN_CONTEXT_ID)
+ assert len(response.service_ids) == 0
+ assert len(response.slice_ids) == 0
+
+def test_scenario_devices_enabled(
+ context_client : ContextClient, # pylint: disable=redefined-outer-name
+) -> None:
+ """
+ This test validates that the devices are enabled.
+ """
+ DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
+
+ num_devices = -1
+ num_devices_enabled, num_retry = 0, 0
+ while (num_devices != num_devices_enabled) and (num_retry < 10):
+ time.sleep(1.0)
+ response = context_client.ListDevices(Empty())
+ num_devices = len(response.devices)
+ num_devices_enabled = 0
+ for device in response.devices:
+ if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue
+ num_devices_enabled += 1
+ LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices))
+ num_retry += 1
+ assert num_devices_enabled == num_devices
diff --git a/src/tests/osm_end2end/tests/test_osm_service_create.py b/src/tests/osm_end2end/tests/test_osm_service_create.py
new file mode 100644
index 0000000000000000000000000000000000000000..73b7a911d510d2419bb157c9cb95cc837e14e645
--- /dev/null
+++ b/src/tests/osm_end2end/tests/test_osm_service_create.py
@@ -0,0 +1,78 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
+from context.client.ContextClient import ContextClient
+from .Fixtures import ( # pylint: disable=unused-import
+ # be careful, order of symbols is important here!
+ osm_wim, context_client
+)
+from .MockOSM import MockOSM
+from .OSM_Constants import SERVICE_CONNECTION_POINTS, SERVICE_TYPE, SERVICE_VARIANT
+
+
+logging.getLogger('ro.sdn.ietfl2vpn').setLevel(logging.DEBUG)
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+
+
+# pylint: disable=redefined-outer-name, unused-argument
+def test_osm_service_create(
+ osm_wim : MockOSM, context_client : ContextClient
+):
+ LOGGER.warning('Creating OSM service variant=%s', SERVICE_VARIANT)
+ osm_wim.create_connectivity_service(SERVICE_TYPE, SERVICE_CONNECTION_POINTS)
+ service_uuid = list(osm_wim.conn_info.keys())[0] # this test adds a single service
+
+ result = osm_wim.get_connectivity_service_status(service_uuid)
+ assert 'sdn_status' in result
+ assert result['sdn_status'] == 'ACTIVE'
+
+ # Verify the scenario has 1 service and 0 slices
+ response = context_client.GetContext(ADMIN_CONTEXT_ID)
+ assert len(response.service_ids) == 1
+ assert len(response.slice_ids) == 0
+
+ # Check there are no slices
+ response = context_client.ListSlices(ADMIN_CONTEXT_ID)
+ LOGGER.warning('Slices[{:d}] = {:s}'.format(
+ len(response.slices), grpc_message_to_json_string(response)
+ ))
+ assert len(response.slices) == 0
+
+ # Check there is 1 service
+ response = context_client.ListServices(ADMIN_CONTEXT_ID)
+ LOGGER.warning('Services[{:d}] = {:s}'.format(
+ len(response.services), grpc_message_to_json_string(response)
+ ))
+ assert len(response.services) == 1
+
+ for service in response.services:
+ service_id = service.service_id
+ assert service_id.service_uuid.uuid == service_uuid
+ assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE
+ assert service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM
+
+ response = context_client.ListConnections(service_id)
+ LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
+ grpc_message_to_json_string(service_id), len(response.connections),
+ grpc_message_to_json_string(response)
+ ))
+ assert len(response.connections) == 1
diff --git a/src/tests/osm_end2end/tests/test_osm_service_remove.py b/src/tests/osm_end2end/tests/test_osm_service_remove.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc6aa7742377cf049170a136c6ca411ae92093d5
--- /dev/null
+++ b/src/tests/osm_end2end/tests/test_osm_service_remove.py
@@ -0,0 +1,86 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Set
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Context import json_context_id
+from context.client.ContextClient import ContextClient
+from .Fixtures import ( # pylint: disable=unused-import
+ # be careful, order of symbols is important here!
+ osm_wim, context_client
+)
+from .MockOSM import MockOSM
+from .OSM_Constants import SERVICE_VARIANT
+
+
+logging.getLogger('ro.sdn.ietfl2vpn').setLevel(logging.DEBUG)
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
+
+
+# pylint: disable=redefined-outer-name, unused-argument
+def test_osm_service_remove(
+ osm_wim : MockOSM, context_client : ContextClient
+):
+ LOGGER.warning('Removing OSM service variant=%s', SERVICE_VARIANT)
+ # Verify the scenario has 1 service and 0 slices
+ response = context_client.GetContext(ADMIN_CONTEXT_ID)
+ assert len(response.service_ids) == 1
+ assert len(response.slice_ids) == 0
+
+ # Check there are no slices
+ response = context_client.ListSlices(ADMIN_CONTEXT_ID)
+ LOGGER.warning('Slices[{:d}] = {:s}'.format(
+ len(response.slices), grpc_message_to_json_string(response)
+ ))
+ assert len(response.slices) == 0
+
+ # Check there is 1 service
+ response = context_client.ListServices(ADMIN_CONTEXT_ID)
+ LOGGER.warning('Services[{:d}] = {:s}'.format(
+ len(response.services), grpc_message_to_json_string(response)
+ ))
+ assert len(response.services) == 1
+
+ service_uuids : Set[str] = set()
+ for service in response.services:
+ service_id = service.service_id
+ assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE
+ assert service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM
+
+ response = context_client.ListConnections(service_id)
+ LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format(
+ grpc_message_to_json_string(service_id), len(response.connections),
+ grpc_message_to_json_string(response)
+ ))
+ assert len(response.connections) == 1
+
+ service_uuids.add(service_id.service_uuid.uuid)
+
+ # Identify service to delete
+ assert len(service_uuids) == 1
+ service_uuid = service_uuids.pop()
+
+ osm_wim.conn_info[service_uuid] = dict() # delete just needs the placeholder to be populated
+ osm_wim.delete_connectivity_service(service_uuid)
+
+ # Verify the scenario has no services/slices
+ response = context_client.GetContext(ADMIN_CONTEXT_ID)
+ assert len(response.service_ids) == 0
+ assert len(response.slice_ids) == 0
diff --git a/src/tests/ryu-openflow/.gitlab-ci.yml b/src/tests/ryu-openflow/.gitlab-ci.yml
index 0002ff6f57fe66bf2d4eef470e3b65347454a206..6caebb2047565bcc75cff265c0ac677112b8eb75 100644
--- a/src/tests/ryu-openflow/.gitlab-ci.yml
+++ b/src/tests/ryu-openflow/.gitlab-ci.yml
@@ -191,6 +191,28 @@ end2end_test ryu-openflow:
- echo "Mininet is ready!"
# - docker logs mininet
+ # Wait for initialization of OVS switches
+ - >
+ while true; do
+ echo "Attempt: $LOOP_COUNTER"
+ echo "Waiting for OVS bridges s1-s5..."
+ sleep 1;
+ OVS_OUTPUT="$(docker exec mininet bash -c "ovs-vsctl show")"
+ if echo "$OVS_OUTPUT" | grep -q "Bridge s1" &&
+ echo "$OVS_OUTPUT" | grep -q "Bridge s2" &&
+ echo "$OVS_OUTPUT" | grep -q "Bridge s3" &&
+ echo "$OVS_OUTPUT" | grep -q "Bridge s4" &&
+ echo "$OVS_OUTPUT" | grep -q "Bridge s5"; then
+ echo "All OVS bridges are ready!"
+ break
+ fi
+ LOOP_COUNTER=$((LOOP_COUNTER + 1))
+ if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then
+ echo "Max attempts reached, exiting the loop."
+ break
+ fi
+ done
+
# Dump configuration of the switches (OpenFlow rules configured) (after starting Mininet)
- docker exec mininet bash -c "ovs-vsctl show"
- docker exec mininet bash -c "ovs-ofctl dump-flows s1"
diff --git a/src/tests/sns4sns26/clab/r1-startup.cfg b/src/tests/sns4sns26/clab/r1-startup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..fdac2ee972efd7c37d8deb8fb21101e315d9bb04
--- /dev/null
+++ b/src/tests/sns4sns26/clab/r1-startup.cfg
@@ -0,0 +1,51 @@
+! Command: show running-config
+! device: r1 (cEOSLab, EOS-4.34.4M-45127473.4344M (engineering build))
+!
+no aaa root
+!
+username admin privilege 15 role network-admin secret sha512 $6$v.q.IH8.dY2VUI8P$S6GkfbDxjEm8kHeU/5oEWrlcTFS2vBr4mPK4s8d0w2gi6wWR1jcajM8fqg93405IfDm6yLqSh4IC1AKbwIdKr/
+!
+management api http-commands
+ no shutdown
+!
+no service interface inactive port-id allocation disabled
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname r1
+!
+spanning-tree mode mstp
+!
+system l1
+ unsupported speed action error
+ unsupported error-correction action error
+!
+management api gnmi
+ transport grpc default
+!
+management api netconf
+ transport ssh default
+!
+interface Ethernet2
+!
+interface Ethernet10
+ mtu 1400
+ no switchport
+!
+interface Management0
+ ip address 172.20.20.101/24
+!
+ip routing
+!
+ip route 0.0.0.0/0 172.20.20.1
+!
+router multicast
+ ipv4
+ software-forwarding kernel
+ !
+ ipv6
+ software-forwarding kernel
+!
+end
diff --git a/src/tests/sns4sns26/clab/r2-startup.cfg b/src/tests/sns4sns26/clab/r2-startup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..22ad9852b5c9c80dffe06dab96a752897354b872
--- /dev/null
+++ b/src/tests/sns4sns26/clab/r2-startup.cfg
@@ -0,0 +1,49 @@
+! Command: show running-config
+! device: r2 (cEOSLab, EOS-4.34.4M-45127473.4344M (engineering build))
+!
+no aaa root
+!
+username admin privilege 15 role network-admin secret sha512 $6$Fu.Mcm.e.zgLUCv9$bEp3TJMx3.6GqNcx5MpLEpWyNgVrvjW6zDXhbu7.iIeqIuH7z0rN7zGnSWU18lIGI7B9BK3ShmPIYsF7hPLup/
+!
+management api http-commands
+ no shutdown
+!
+no service interface inactive port-id allocation disabled
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname r2
+!
+spanning-tree mode mstp
+!
+system l1
+ unsupported speed action error
+ unsupported error-correction action error
+!
+management api gnmi
+ transport grpc default
+!
+management api netconf
+ transport ssh default
+!
+interface Ethernet1
+!
+interface Ethernet3
+!
+interface Management0
+ ip address 172.20.20.102/24
+!
+ip routing
+!
+ip route 0.0.0.0/0 172.20.20.1
+!
+router multicast
+ ipv4
+ software-forwarding kernel
+ !
+ ipv6
+ software-forwarding kernel
+!
+end
diff --git a/src/tests/sns4sns26/clab/r3-startup.cfg b/src/tests/sns4sns26/clab/r3-startup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..d11649d88cf994c7dd74549b15b3e2694aa44bb3
--- /dev/null
+++ b/src/tests/sns4sns26/clab/r3-startup.cfg
@@ -0,0 +1,51 @@
+! Command: show running-config
+! device: r3 (cEOSLab, EOS-4.34.4M-45127473.4344M (engineering build))
+!
+no aaa root
+!
+username admin privilege 15 role network-admin secret sha512 $6$Fu.Mcm.e.zgLUCv9$bEp3TJMx3.6GqNcx5MpLEpWyNgVrvjW6zDXhbu7.iIeqIuH7z0rN7zGnSWU18lIGI7B9BK3ShmPIYsF7hPLup/
+!
+management api http-commands
+ no shutdown
+!
+no service interface inactive port-id allocation disabled
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname r3
+!
+spanning-tree mode mstp
+!
+system l1
+ unsupported speed action error
+ unsupported error-correction action error
+!
+management api gnmi
+ transport grpc default
+!
+management api netconf
+ transport ssh default
+!
+interface Ethernet2
+!
+interface Ethernet10
+ mtu 1400
+ no switchport
+!
+interface Management0
+ ip address 172.20.20.103/24
+!
+ip routing
+!
+ip route 0.0.0.0/0 172.20.20.1
+!
+router multicast
+ ipv4
+ software-forwarding kernel
+ !
+ ipv6
+ software-forwarding kernel
+!
+end
diff --git a/src/tests/sns4sns26/clab/sns4sns26.clab.yml b/src/tests/sns4sns26/clab/sns4sns26.clab.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8f8a94aec1e7e3f4e352fe98f03f5dca12e6b848
--- /dev/null
+++ b/src/tests/sns4sns26/clab/sns4sns26.clab.yml
@@ -0,0 +1,57 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ref: https://containerlab.dev/manual/network/#macvlan-links
+# Ref: https://containerlab.dev/manual/network/#host-links
+
+
+# ETSI Joint SDG Lab
+
+name: sns4sns26
+
+mgmt:
+ network: mgmt-net
+ ipv4-subnet: 172.20.20.0/24
+ mtu: 1400
+
+topology:
+ kinds:
+ arista_ceos:
+ kind: arista_ceos
+ image: ceos:4.34.4M
+ linux:
+ kind: linux
+ image: ghcr.io/hellt/network-multitool:latest
+
+ nodes:
+ r1:
+ kind: arista_ceos
+ mgmt-ipv4: 172.20.20.101
+ startup-config: r1-startup.cfg
+
+ r2:
+ kind: arista_ceos
+ mgmt-ipv4: 172.20.20.102
+ startup-config: r2-startup.cfg
+
+ r3:
+ kind: arista_ceos
+ mgmt-ipv4: 172.20.20.103
+ startup-config: r3-startup.cfg
+
+ links:
+ - endpoints: ["r1:eth2", "r2:eth1"]
+ - endpoints: ["r2:eth3", "r3:eth2"]
+ - endpoints: ["r1:eth10", "macvlan:enp0s4"] # connect to site A virtual network
+ - endpoints: ["r3:eth10", "macvlan:enp0s5"] # connect to site B virtual network
diff --git a/src/tests/sns4sns26/data/mock_osm_connection.json b/src/tests/sns4sns26/data/mock_osm_connection.json
new file mode 100644
index 0000000000000000000000000000000000000000..ab8944453be8b42d27e38ad6eceaa72ea2ef1dc6
--- /dev/null
+++ b/src/tests/sns4sns26/data/mock_osm_connection.json
@@ -0,0 +1,6 @@
+{
+ "wim_ip": "10.0.2.10",
+ "wim_port": 80,
+ "wim_user": "admin",
+ "wim_pass": "admin"
+}
diff --git a/src/tests/sns4sns26/data/mock_osm_mapping.json b/src/tests/sns4sns26/data/mock_osm_mapping.json
new file mode 100644
index 0000000000000000000000000000000000000000..b9d04e9492ae597877649d75e9fcefbf65696a47
--- /dev/null
+++ b/src/tests/sns4sns26/data/mock_osm_mapping.json
@@ -0,0 +1,4 @@
+[
+ {"service_endpoint_id": "SiteA", "device-id": "router-1", "service_mapping_info": {"bearer": {"bearer-reference": "SNS4SNS26:SiteA"}, "site-id": "A"}},
+ {"service_endpoint_id": "SiteB", "device-id": "router-3", "service_mapping_info": {"bearer": {"bearer-reference": "SNS4SNS26:SiteB"}, "site-id": "B"}}
+]
diff --git a/src/tests/sns4sns26/data/tfs-topology.json b/src/tests/sns4sns26/data/tfs-topology.json
new file mode 100644
index 0000000000000000000000000000000000000000..3c73515d807a44b2291ef8750671a3322b52f711
--- /dev/null
+++ b/src/tests/sns4sns26/data/tfs-topology.json
@@ -0,0 +1,126 @@
+{
+ "contexts": [
+ {"context_id": {"context_uuid": {"uuid": "admin"}}}
+ ],
+ "topologies": [
+ {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}
+ ],
+ "devices": [
+ {
+ "device_id": {"device_uuid": {"uuid": "cluster-a"}}, "device_type": "emu-datacenter",
+ "device_drivers": ["DEVICEDRIVER_UNDEFINED"],
+ "device_config": {"config_rules": [
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+ {"uuid": "eth1", "type": "copper"}, {"uuid": "int", "type": "copper"}
+ ]}}}
+ ]}
+ },
+ {
+ "device_id": {"device_uuid": {"uuid": "cluster-b"}}, "device_type": "emu-datacenter",
+ "device_drivers": ["DEVICEDRIVER_UNDEFINED"],
+ "device_config": {"config_rules": [
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+ {"uuid": "eth1", "type": "copper"}, {"uuid": "int", "type": "copper"}
+ ]}}}
+ ]}
+ },
+ {
+ "device_id": {"device_uuid": {"uuid": "router-1"}}, "device_type": "packet-router",
+ "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"],
+ "device_config": {"config_rules": [
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.101"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {
+ "username": "admin", "password": "admin", "use_tls": false
+ }}}
+ ]}
+ },
+ {
+ "device_id": {"device_uuid": {"uuid": "router-2"}}, "device_type": "packet-router",
+ "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"],
+ "device_config": {"config_rules": [
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.102"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {
+ "username": "admin", "password": "admin", "use_tls": false
+ }}}
+ ]}
+ },
+ {
+ "device_id": {"device_uuid": {"uuid": "router-3"}}, "device_type": "packet-router",
+ "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"],
+ "device_config": {"config_rules": [
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.103"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}},
+ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {
+ "username": "admin", "password": "admin", "use_tls": false
+ }}}
+ ]}
+ }
+ ],
+ "links": [
+ {
+ "link_id": {"link_uuid": {"uuid": "router-1/Ethernet2==router-2/Ethernet1"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "router-1"}}, "endpoint_uuid": {"uuid": "Ethernet2"}},
+ {"device_id": {"device_uuid": {"uuid": "router-2"}}, "endpoint_uuid": {"uuid": "Ethernet1"}}
+ ]
+ },
+ {
+ "link_id": {"link_uuid": {"uuid": "router-2/Ethernet1==router-1/Ethernet2"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "router-2"}}, "endpoint_uuid": {"uuid": "Ethernet1"}},
+ {"device_id": {"device_uuid": {"uuid": "router-1"}}, "endpoint_uuid": {"uuid": "Ethernet2"}}
+ ]
+ },
+
+ {
+ "link_id": {"link_uuid": {"uuid": "router-2/Ethernet3==router-3/Ethernet2"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "router-2"}}, "endpoint_uuid": {"uuid": "Ethernet3"}},
+ {"device_id": {"device_uuid": {"uuid": "router-3"}}, "endpoint_uuid": {"uuid": "Ethernet2"}}
+ ]
+ },
+ {
+ "link_id": {"link_uuid": {"uuid": "router-3/Ethernet2==router-2/Ethernet3"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "router-3"}}, "endpoint_uuid": {"uuid": "Ethernet2"}},
+ {"device_id": {"device_uuid": {"uuid": "router-2"}}, "endpoint_uuid": {"uuid": "Ethernet3"}}
+ ]
+ },
+
+ {
+ "link_id": {"link_uuid": {"uuid": "router-1/Ethernet10==cluster-a/eth1"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "router-1"}}, "endpoint_uuid": {"uuid": "Ethernet10"}},
+ {"device_id": {"device_uuid": {"uuid": "cluster-a"}}, "endpoint_uuid": {"uuid": "eth1"}}
+ ]
+ },
+ {
+ "link_id": {"link_uuid": {"uuid": "cluster-a/eth1==router-1/Ethernet10"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "cluster-a"}}, "endpoint_uuid": {"uuid": "eth1"}},
+ {"device_id": {"device_uuid": {"uuid": "router-1"}}, "endpoint_uuid": {"uuid": "Ethernet10"}}
+ ]
+ },
+
+ {
+ "link_id": {"link_uuid": {"uuid": "router-3/Ethernet10==cluster-b/eth1"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "router-3"}}, "endpoint_uuid": {"uuid": "Ethernet10"}},
+ {"device_id": {"device_uuid": {"uuid": "cluster-b"}}, "endpoint_uuid": {"uuid": "eth1"}}
+ ]
+ },
+ {
+ "link_id": {"link_uuid": {"uuid": "cluster-b/eth1==router-3/Ethernet10"}},
+ "link_endpoint_ids": [
+ {"device_id": {"device_uuid": {"uuid": "cluster-b"}}, "endpoint_uuid": {"uuid": "eth1"}},
+ {"device_id": {"device_uuid": {"uuid": "router-3"}}, "endpoint_uuid": {"uuid": "Ethernet10"}}
+ ]
+ }
+ ]
+}
diff --git a/src/tests/sns4sns26/scripts/clab-cli-r1.sh b/src/tests/sns4sns26/scripts/clab-cli-r1.sh
new file mode 100644
index 0000000000000000000000000000000000000000..df9c15c61b7e779af0269f9b1fed974cca777516
--- /dev/null
+++ b/src/tests/sns4sns26/scripts/clab-cli-r1.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-sns4sns26-r1 Cli
diff --git a/src/tests/sns4sns26/scripts/clab-cli-r2.sh b/src/tests/sns4sns26/scripts/clab-cli-r2.sh
new file mode 100644
index 0000000000000000000000000000000000000000..fcb577295b1bdc342f0cd638a9edfbc1e15d1a6a
--- /dev/null
+++ b/src/tests/sns4sns26/scripts/clab-cli-r2.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-sns4sns26-r2 Cli
diff --git a/src/tests/sns4sns26/scripts/clab-cli-r3.sh b/src/tests/sns4sns26/scripts/clab-cli-r3.sh
new file mode 100644
index 0000000000000000000000000000000000000000..fcb577295b1bdc342f0cd638a9edfbc1e15d1a6a
--- /dev/null
+++ b/src/tests/sns4sns26/scripts/clab-cli-r3.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker exec -it clab-sns4sns26-r2 Cli
diff --git a/src/tests/sns4sns26/scripts/clab-deploy.sh b/src/tests/sns4sns26/scripts/clab-deploy.sh
new file mode 100644
index 0000000000000000000000000000000000000000..8554532da9d95e6e1072b6c68fd1fb5c52af6534
--- /dev/null
+++ b/src/tests/sns4sns26/scripts/clab-deploy.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+sudo ip link set ens4 down
+sudo ip link set ens5 down
+sudo ip link set ens4 up
+sudo ip link set ens5 up
+
+cd ~/tfs-ctrl/src/tests/sns4sns26
+sudo containerlab deploy --topo clab/sns4sns26.clab.yml
diff --git a/src/tests/sns4sns26/scripts/clab-destroy.sh b/src/tests/sns4sns26/scripts/clab-destroy.sh
new file mode 100644
index 0000000000000000000000000000000000000000..af92ca09239498a70d61a08cc877258e4e9c1c7c
--- /dev/null
+++ b/src/tests/sns4sns26/scripts/clab-destroy.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cd ~/tfs-ctrl/src/tests/sns4sns26
+sudo containerlab destroy --topo clab/sns4sns26.clab.yml
+sudo rm -rf clab/clab-sns4sns26/ clab/.sns4sns26.clab.yml.bak
diff --git a/src/tests/sns4sns26/scripts/clab-inspect.sh b/src/tests/sns4sns26/scripts/clab-inspect.sh
new file mode 100644
index 0000000000000000000000000000000000000000..941f2606c4225374230f5735882b6663e24b218d
--- /dev/null
+++ b/src/tests/sns4sns26/scripts/clab-inspect.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cd ~/tfs-ctrl/src/tests/sns4sns26
+sudo containerlab inspect --topo clab/sns4sns26.clab.yml
diff --git a/src/tests/tools/mock_osm/README.md b/src/tests/tools/mock_osm/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a197303cb0e26399a3654fb40613c69869ef9e85
--- /dev/null
+++ b/src/tests/tools/mock_osm/README.md
@@ -0,0 +1,41 @@
+# Mock OSM (tests.tools.mock_osm)
+
+This package provides a small interactive shell for testing WIM connectivity
+through the MockOSM connector.
+
+## Run
+
+```bash
+python -m tests.tools.mock_osm example_connection.json example_mapping.json
+```
+
+## Commands
+
+- `create [vlan ]`
+ - `ELINE` requires exactly 2 endpoints
+ - `ELAN` requires at least 2 endpoints
+- `status`
+- `delete`
+- `exit`
+
+Endpoints are provided as a list of strings (service endpoint IDs), for example:
+
+```text
+(mock-osm) create ELINE ep-R1-1/2 ep-R4-1/3
+```
+
+Optional VLAN tagging for all endpoints:
+
+```text
+(mock-osm) create ELINE ep-R1-1/2 ep-R4-1/3 vlan 1234
+```
+
+## Example configs
+
+See:
+- `src/tests/tools/mock_osm/example_connection.json`
+- `src/tests/tools/mock_osm/example_mapping.json`
+
+The mapping file is a JSON list where each entry includes the
+`service_endpoint_id`, `device-id`, and `service_mapping_info` with `bearer`
+and `site-id`.
diff --git a/src/tests/tools/mock_osm/__main__.py b/src/tests/tools/mock_osm/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f94a9a633386cdbb16a7eedd24a142b813be564
--- /dev/null
+++ b/src/tests/tools/mock_osm/__main__.py
@@ -0,0 +1,252 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import cmd
+import json
+import logging
+import shlex
+import sys
+from typing import Any, Dict, List, Tuple
+
+from .MockOSM import MockOSM
+
+logging.basicConfig(level=logging.DEBUG)
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+SUPPORTED_SERVICE_TYPES = {"ELINE", "ELAN"}
+
+
+def _load_json_file(path: str) -> Any:
+ with open(path, "r", encoding="utf-8") as handle:
+ return json.load(handle)
+
+
+def _first_present(data: Dict[str, Any], keys: Tuple[str, ...]) -> Any:
+ for key in keys:
+ if key in data:
+ return data[key]
+ return None
+
+
+def _parse_connection_config(config: Dict[str, Any]) -> Tuple[str, str, str]:
+ if not isinstance(config, dict):
+ raise ValueError("Connection config must be a JSON object.")
+
+ wim_ip = _first_present(config, ("wim_ip", "ip", "host", "address"))
+ wim_port = _first_present(config, ("wim_port", "port"))
+ wim_user = _first_present(config, ("wim_user", "user", "username"))
+ wim_pass = _first_present(config, ("wim_pass", "wim_password", "pass", "password"))
+
+ missing = []
+ if not wim_ip:
+ missing.append("wim_ip")
+ if wim_port is None:
+ missing.append("wim_port")
+ if not wim_user:
+ missing.append("wim_user")
+ if wim_pass is None:
+ missing.append("wim_pass")
+
+ if missing:
+ raise ValueError(
+ "Connection config missing required fields: {}".format(", ".join(missing))
+ )
+
+ try:
+ wim_port = int(wim_port)
+ except (TypeError, ValueError) as exc:
+ raise ValueError("wim_port must be an integer") from exc
+
+ wim_url = "http://{:s}:{:d}".format(str(wim_ip), wim_port)
+ return wim_url, str(wim_user), str(wim_pass)
+
+
+def _parse_mapping_config(mapping: Any) -> Dict[str, Dict[str, Any]]:
+ if not isinstance(mapping, list):
+ raise ValueError("Mapping config must be a JSON list.")
+
+ mapping_by_id = {}
+ for index, entry in enumerate(mapping):
+ if not isinstance(entry, dict):
+ raise ValueError("Mapping entry {:d} must be a JSON object".format(index))
+ service_endpoint_id = entry.get("service_endpoint_id")
+ if not service_endpoint_id or not isinstance(service_endpoint_id, str):
+ raise ValueError(
+ "Mapping entry {:d} missing service_endpoint_id".format(index)
+ )
+ if service_endpoint_id in mapping_by_id:
+ LOGGER.warning(
+ "Duplicate service_endpoint_id in mapping: %s", service_endpoint_id
+ )
+ mapping_by_id[service_endpoint_id] = entry
+
+ return mapping_by_id
+
+
+class MockOSMShell(cmd.Cmd):
+ intro = "Welcome to the MockOSM shell.\nType help or ? to list commands.\n"
+ prompt = "(mock-osm) "
+
+ def __init__(self, mock_osm: MockOSM, mapping_by_id: Dict[str, Dict[str, Any]]):
+ super().__init__()
+ self.mock_osm = mock_osm
+ self.mapping_by_id = mapping_by_id
+
+ def do_create(self, arg: str) -> None:
+ "Create a connectivity service: create [vlan ]"
+ try:
+ service_type, endpoints = self._parse_create_args(arg)
+ service_uuid = self.mock_osm.create_connectivity_service(
+ service_type, endpoints
+ )
+ print("Service {:s} created".format(service_uuid))
+ except Exception as exc:
+ print("Error: {:s}".format(str(exc)))
+
+ def do_status(self, arg: str) -> None:
+ "Retrieve status of services"
+ service_uuids = list(self.mock_osm.conn_info.keys())
+ for service_uuid in service_uuids:
+ status = self.mock_osm.get_connectivity_service_status(service_uuid)
+ print("Status of Service {:s} is {:s}".format(service_uuid, str(status)))
+
+ def do_delete(self, arg: str) -> None:
+ "Delete all services"
+ service_uuids = list(self.mock_osm.conn_info.keys())
+ for service_uuid in service_uuids:
+ self.mock_osm.delete_connectivity_service(service_uuid)
+ print("Service {:s} deleted".format(service_uuid))
+
+ def do_exit(self, arg: str) -> bool:
+ "Exit MockOSM"
+ print("Bye!")
+ return True
+
+ def _parse_create_args(self, arg: str) -> Tuple[str, List[Dict[str, Any]]]:
+ tokens = shlex.split(arg)
+ if len(tokens) < 2:
+ raise ValueError(
+ "Usage: create [vlan ]"
+ )
+
+ service_type = tokens[0]
+ endpoints_tokens = tokens[1:]
+ vlan_id = None
+ if "vlan" in endpoints_tokens:
+ vlan_index = endpoints_tokens.index("vlan")
+ if vlan_index == len(endpoints_tokens) - 1:
+ raise ValueError("vlan requires ")
+ if vlan_index + 2 != len(endpoints_tokens):
+ raise ValueError("vlan must be the last argument")
+ vlan_token = endpoints_tokens[vlan_index + 1]
+ try:
+ vlan_id = int(vlan_token)
+ except (TypeError, ValueError) as exc:
+ raise ValueError("vlan-id must be an integer") from exc
+ endpoints_tokens = endpoints_tokens[:vlan_index]
+
+ endpoints = self._load_endpoints(endpoints_tokens)
+ self._validate_service_request(service_type, endpoints)
+ connection_points = [
+ {
+ "service_endpoint_id": endpoint,
+ "service_endpoint_encapsulation_type": (
+ "dot1q" if vlan_id is not None else "none"
+ ),
+ **(
+ {"service_endpoint_encapsulation_info": {"vlan": vlan_id}}
+ if vlan_id is not None
+ else {}
+ ),
+ }
+ for endpoint in endpoints
+ ]
+ return service_type, connection_points
+
+ def _load_endpoints(self, tokens: List[str]) -> List[str]:
+ endpoints = []
+ for token in tokens:
+ if "," in token:
+ endpoints.extend([item for item in token.split(",") if item])
+ else:
+ endpoints.append(token)
+ return endpoints
+
+ def _validate_service_request(
+ self, service_type: str, endpoints: List[str]
+ ) -> None:
+ if not isinstance(service_type, str) or not service_type:
+ raise ValueError("Service type must be a non-empty string.")
+
+ if service_type not in SUPPORTED_SERVICE_TYPES:
+ raise ValueError(
+ "Unsupported service type. Supported: {}".format(
+ ", ".join(sorted(SUPPORTED_SERVICE_TYPES))
+ )
+ )
+
+ if not endpoints:
+ raise ValueError("Endpoints list must not be empty.")
+
+ if service_type == "ELINE" and len(endpoints) != 2:
+ raise ValueError("ELINE requires exactly 2 endpoints.")
+
+ if service_type == "ELAN" and len(endpoints) < 2:
+ raise ValueError("ELAN requires at least 2 endpoints.")
+
+ for index, endpoint in enumerate(endpoints):
+ if not isinstance(endpoint, str) or not endpoint:
+ raise ValueError(
+ "Endpoint {:d} must be a non-empty string".format(index)
+ )
+ if endpoint not in self.mapping_by_id:
+ raise ValueError(
+ "Endpoint {:s} not found in WIM port mapping".format(endpoint)
+ )
+
+
+def _parse_args(argv: List[str]) -> argparse.Namespace:
+ parser = argparse.ArgumentParser(description="MockOSM shell")
+ parser.add_argument(
+ "connection_config",
+ help="JSON with WIM IP, port, user, and password",
+ )
+ parser.add_argument(
+ "mapping_config",
+ help="JSON with pre-generated WIM port mapping",
+ )
+ return parser.parse_args(argv)
+
+
+def main(argv: List[str]) -> int:
+ args = _parse_args(argv)
+
+ try:
+ connection_config = _load_json_file(args.connection_config)
+ mapping_config = _load_json_file(args.mapping_config)
+ wim_url, wim_user, wim_pass = _parse_connection_config(connection_config)
+ mapping_by_id = _parse_mapping_config(mapping_config)
+ except Exception as exc:
+ print("Configuration error: {:s}".format(str(exc)), file=sys.stderr)
+ return 2
+
+ mock_osm = MockOSM(wim_url, mapping_config, wim_user, wim_pass)
+ MockOSMShell(mock_osm, mapping_by_id).cmdloop()
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/src/tests/tools/mock_osm/example_connection.json b/src/tests/tools/mock_osm/example_connection.json
new file mode 100644
index 0000000000000000000000000000000000000000..ab8944453be8b42d27e38ad6eceaa72ea2ef1dc6
--- /dev/null
+++ b/src/tests/tools/mock_osm/example_connection.json
@@ -0,0 +1,6 @@
+{
+ "wim_ip": "10.0.2.10",
+ "wim_port": 80,
+ "wim_user": "admin",
+ "wim_pass": "admin"
+}
diff --git a/src/tests/tools/mock_osm/example_mapping.json b/src/tests/tools/mock_osm/example_mapping.json
new file mode 100644
index 0000000000000000000000000000000000000000..10897cac12907e51b1a4d06775414445ac09768d
--- /dev/null
+++ b/src/tests/tools/mock_osm/example_mapping.json
@@ -0,0 +1,82 @@
+[
+ {
+ "device-id": "R1",
+ "service_endpoint_id": "ep-R1-1/2",
+ "service_mapping_info": {
+ "bearer": {
+ "bearer-reference": "R1:1/2"
+ },
+ "site-id": "1"
+ }
+ },
+ {
+ "device-id": "R1",
+ "service_endpoint_id": "ep-R1-1/3",
+ "service_mapping_info": {
+ "bearer": {
+ "bearer-reference": "R1:1/3"
+ },
+ "site-id": "1"
+ }
+ },
+ {
+ "device-id": "R2",
+ "service_endpoint_id": "ep-R2-1/2",
+ "service_mapping_info": {
+ "bearer": {
+ "bearer-reference": "R2:1/2"
+ },
+ "site-id": "2"
+ }
+ },
+ {
+ "device-id": "R2",
+ "service_endpoint_id": "ep-R2-1/3",
+ "service_mapping_info": {
+ "bearer": {
+ "bearer-reference": "R2:1/3"
+ },
+ "site-id": "2"
+ }
+ },
+ {
+ "device-id": "R3",
+ "service_endpoint_id": "ep-R3-1/2",
+ "service_mapping_info": {
+ "bearer": {
+ "bearer-reference": "R3:1/2"
+ },
+ "site-id": "3"
+ }
+ },
+ {
+ "device-id": "R3",
+ "service_endpoint_id": "ep-R3-1/3",
+ "service_mapping_info": {
+ "bearer": {
+ "bearer-reference": "R3:1/3"
+ },
+ "site-id": "3"
+ }
+ },
+ {
+ "device-id": "R4",
+ "service_endpoint_id": "ep-R4-1/2",
+ "service_mapping_info": {
+ "bearer": {
+ "bearer-reference": "R4:1/2"
+ },
+ "site-id": "4"
+ }
+ },
+ {
+ "device-id": "R4",
+ "service_endpoint_id": "ep-R4-1/3",
+ "service_mapping_info": {
+ "bearer": {
+ "bearer-reference": "R4:1/3"
+ },
+ "site-id": "4"
+ }
+ }
+]
diff --git a/src/webui/service/templates/main/home.html b/src/webui/service/templates/main/home.html
index 4b66463ca15052f85575b573b0cd4e8244f6e516..e0ac9f1fe2f72265e054a6af82d7458eeff8b131 100644
--- a/src/webui/service/templates/main/home.html
+++ b/src/webui/service/templates/main/home.html
@@ -88,7 +88,7 @@
-
+