Commit 4f2b1186 authored by Waleed Akbar's avatar Waleed Akbar
Browse files

Add XML configuration files and tests for DSCM pluggables for ECOC2026 Demo (Proteus-6G)

- Introduced `edit_dscm_hub.xml` and `edit_dscm_leaves.xml` for hub and leaf configurations.
- Added `run_ecoc26_test.sh` script to automate test execution with coverage measurement.
- Implemented `test_ecoc26.py` for end-to-end testing of the DSCM pluggables flow.
- Developed `test_ecoc26_messages.py` for generating HTTP JSON payloads and gRPC proto messages.
- Added `test_ecoc26_xml_capture.py` to render and validate NETCONF XML configurations for hub and leaf pluggables.
parent 3a843d2d
Loading
Loading
Loading
Loading
+223 −0
Original line number Diff line number Diff line
# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
Pytest fixtures for the ecoc26 E2E pluggables test.

"""

import logging
import os
import time

import pytest

from common.Constants import ServiceNameEnum
from common.Settings  import (
    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC,
    get_env_var_name, get_service_port_grpc,
)
from common.proto.context_pb2       import Context, DeviceId, Topology
from common.proto.context_pb2_grpc  import add_ContextServiceServicer_to_server
from common.tests.MockServicerImpl_Context import MockServicerImpl_Context
from common.tools.service.GenericGrpcService import GenericGrpcService

from context.client.ContextClient   import ContextClient
from device.client.DeviceClient     import DeviceClient
from device.service.DeviceService   import DeviceService
from device.service.driver_api.DriverFactory      import DriverFactory
from device.service.driver_api.DriverInstanceCache import DriverInstanceCache
from device.service.drivers import DRIVERS

from pluggables.client.PluggablesClient  import PluggablesClient
from pluggables.service.PluggablesService import PluggablesService

from nbi_test_data.MockWebServer    import MockWebServer
from pluggable_test_data.CommonObjects import (
    CONTEXT,
    TOPOLOGY,
    DEVICE_HUB_ID,
    DEVICE_HUB_UUID,
    DEVICE_LEAF_ID,
    DEVICE_LEAF_UUID,
    get_device_hub_with_connect_rules,
    get_device_leaf_with_connect_rules,
)


LOGGER = logging.getLogger(__name__)

LOCAL_HOST       = '127.0.0.1'
MOCKSERVICE_PORT = 10000

# Port assignments (same pattern as PreparePluggablesTestScenario.py)
CONTEXT_SERVICE_PORT   = MOCKSERVICE_PORT + get_service_port_grpc(ServiceNameEnum.CONTEXT)
DEVICE_SERVICE_PORT    = MOCKSERVICE_PORT + get_service_port_grpc(ServiceNameEnum.DEVICE)
PLUGGABLE_SERVICE_PORT = get_service_port_grpc(ServiceNameEnum.PLUGGABLES)

# Set env vars before any service or client is instantiated
os.environ[get_env_var_name(ServiceNameEnum.CONTEXT,   ENVVAR_SUFIX_SERVICE_HOST     )] = LOCAL_HOST
os.environ[get_env_var_name(ServiceNameEnum.CONTEXT,   ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(CONTEXT_SERVICE_PORT)
os.environ[get_env_var_name(ServiceNameEnum.DEVICE,    ENVVAR_SUFIX_SERVICE_HOST     )] = LOCAL_HOST
os.environ[get_env_var_name(ServiceNameEnum.DEVICE,    ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(DEVICE_SERVICE_PORT)
os.environ[get_env_var_name(ServiceNameEnum.PLUGGABLES,ENVVAR_SUFIX_SERVICE_HOST     )] = LOCAL_HOST
os.environ[get_env_var_name(ServiceNameEnum.PLUGGABLES,ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(PLUGGABLE_SERVICE_PORT)


# ---------------------------------------------------------------------------
# Mock Context Service
# ---------------------------------------------------------------------------

class MockContextService(GenericGrpcService):
    def __init__(self, bind_port):
        super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False,
                         cls_name='MockContextService')

    def install_servicers(self):
        self.context_servicer = MockServicerImpl_Context()
        add_ContextServiceServicer_to_server(self.context_servicer, self.server)


@pytest.fixture(scope='session')
def mock_context_service():
    LOGGER.info('Starting MockContextService...')
    svc = MockContextService(CONTEXT_SERVICE_PORT)
    svc.start()
    yield svc
    LOGGER.info('Stopping MockContextService...')
    svc.stop()


# ---------------------------------------------------------------------------
# Context client
# ---------------------------------------------------------------------------

@pytest.fixture(scope='session')
def context_client(mock_context_service):  # pylint: disable=redefined-outer-name
    LOGGER.info('Creating ContextClient...')
    client = ContextClient()
    yield client
    LOGGER.info('Closing ContextClient...')
    client.close()


# ---------------------------------------------------------------------------
# Device service + client
# ---------------------------------------------------------------------------

@pytest.fixture(scope='session')
def device_service(context_client: ContextClient):  # pylint: disable=redefined-outer-name
    LOGGER.info('Starting DeviceService...')
    factory  = DriverFactory(DRIVERS)
    cache    = DriverInstanceCache(factory)
    svc      = DeviceService(cache)
    svc.start()
    yield svc
    LOGGER.info('Stopping DeviceService...')
    svc.stop()


@pytest.fixture(scope='session')
def device_client(device_service: DeviceService):  # pylint: disable=redefined-outer-name
    LOGGER.info('Creating DeviceClient...')
    client = DeviceClient()
    yield client
    LOGGER.info('Closing DeviceClient...')
    client.close()


# ---------------------------------------------------------------------------
# Pluggables service + client
# ---------------------------------------------------------------------------

@pytest.fixture(scope='session')
def pluggables_service(context_client: ContextClient):  # pylint: disable=redefined-outer-name
    LOGGER.info('Starting PluggablesService...')
    svc = PluggablesService()
    svc.start()
    yield svc
    LOGGER.info('Stopping PluggablesService...')
    svc.stop()


@pytest.fixture(scope='session')
def pluggables_client(pluggables_service: PluggablesService,
                      context_client: ContextClient,
                      device_client: DeviceClient):
    LOGGER.info('Creating PluggablesClient...')
    client = PluggablesClient()
    yield client
    LOGGER.info('Closing PluggablesClient...')
    client.close()


# ---------------------------------------------------------------------------
# NBI REST server
# ---------------------------------------------------------------------------

@pytest.fixture(scope='session')
def nbi_web_server(pluggables_service: PluggablesService):  # pylint: disable=redefined-outer-name
    LOGGER.info('Starting NBI MockWebServer...')
    server = MockWebServer()
    server.start()
    time.sleep(1)   # give the Flask server a moment to bind
    yield server
    # daemon thread — terminates with the process


# ---------------------------------------------------------------------------
# Environment preparation (autouse)
# ---------------------------------------------------------------------------

@pytest.fixture(scope='session', autouse=True)
def prepare_environment(context_client: ContextClient,
                        device_client: DeviceClient,
                        pluggables_service: PluggablesService,
                        nbi_web_server):
    """Register context, topology, and both test devices before any test runs."""
    LOGGER.info('Preparing test environment...')

    context_client.SetContext(Context(**CONTEXT))
    context_client.SetTopology(Topology(**TOPOLOGY))
    LOGGER.info('Created admin Context and Topology')

    hub_device = get_device_hub_with_connect_rules()
    context_client.SetDevice(hub_device)
    LOGGER.info(f'Registered Hub device: {DEVICE_HUB_UUID}')

    leaf_device = get_device_leaf_with_connect_rules()
    context_client.SetDevice(leaf_device)
    LOGGER.info(f'Registered Leaf device: {DEVICE_LEAF_UUID}')

    # Sanity-check that Context accepted both devices
    hub_retrieved  = context_client.GetDevice(DeviceId(**DEVICE_HUB_ID))
    assert hub_retrieved.device_id.device_uuid.uuid == DEVICE_HUB_UUID

    leaf_retrieved = context_client.GetDevice(DeviceId(**DEVICE_LEAF_ID))
    assert leaf_retrieved.device_id.device_uuid.uuid == DEVICE_LEAF_UUID

    LOGGER.info('Test environment is ready.')
    yield
    LOGGER.info('Test environment teardown (nothing to clean up for embedded services).')


# ---------------------------------------------------------------------------
# Per-test logging helper
# ---------------------------------------------------------------------------

@pytest.fixture(autouse=True)
def log_each(request):
    LOGGER.info(f'>>>>>> START {request.node.name} >>>>>>')
    yield
    LOGGER.info(f'<<<<<< END   {request.node.name} <<<<<<')
+241 −0
Original line number Diff line number Diff line
#!/bin/bash
# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


# ----- TeraFlowSDN ------------------------------------------------------------

# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"

# Set the list of components, separated by spaces, you want to build images for, and deploy.
# export TFS_COMPONENTS="context device pathcomp opticalcontroller service nbi webui"
export TFS_COMPONENTS="context device pathcomp service nbi webui"

# Uncomment to activate Monitoring (old)
#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"

# Uncomment to activate Monitoring Framework (new)
# export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation"

# Uncomment to activate QoS Profiles
#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile"

# Uncomment to activate BGP-LS Speaker
#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"

# Uncomment to activate Optical Controller
#   To manage optical connections, "service" requires "opticalcontroller" to be deployed
#   before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
#   "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it.
#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
#    BEFORE="${TFS_COMPONENTS% service*}"
#    AFTER="${TFS_COMPONENTS#* service}"
#    export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}"
#fi

# Uncomment to activate ZTP
#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp"

# Uncomment to activate Policy Manager
# export TFS_COMPONENTS="${TFS_COMPONENTS} policy"

# Uncomment to activate Optical CyberSecurity
#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"

# Uncomment to activate L3 CyberSecurity
#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"

# Uncomment to activate TE
#export TFS_COMPONENTS="${TFS_COMPONENTS} te"

# Uncomment to activate Forecaster
#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster"

# Uncomment to activate E2E Orchestrator
#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator"

# Uncomment to activate VNT Manager
#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager"

# Uncomment to activate OSM Client
#export TFS_COMPONENTS="${TFS_COMPONENTS} osm_client"

# Uncomment to activate DLT and Interdomain
#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt"
#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then
#    export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk"
#    export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem"
#    export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt"
#fi

# Uncomment to activate QKD App
#   To manage QKD Apps, "service" requires "qkd_app" to be deployed
#   before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
#   "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it.
#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
#    BEFORE="${TFS_COMPONENTS% service*}"
#    AFTER="${TFS_COMPONENTS#* service}"
#    export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}"
#fi

# Uncomment to activate SIMAP Connector
#export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector"

# Uncomment to activate Load Generator
#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator"

# Uncomment to activate Pluggables Component
export TFS_COMPONENTS="${TFS_COMPONENTS} pluggables"


# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev"

# Set the name of the Kubernetes namespace to deploy TFS to.
export TFS_K8S_NAMESPACE="tfs"

# Set additional manifest files to be applied after the deployment
export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"

# Uncomment to monitor performance of components
#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"

# Uncomment when deploying Optical CyberSecurity
#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"

# Set the new Grafana admin password
export TFS_GRAFANA_PASSWORD="admin123+"

# Disable skip-build flag to rebuild the Docker images.
export TFS_SKIP_BUILD=""


# ----- CockroachDB ------------------------------------------------------------

# Set the namespace where CockroackDB will be deployed.
export CRDB_NAMESPACE="crdb"

# Set the external port CockroackDB Postgre SQL interface will be exposed to.
export CRDB_EXT_PORT_SQL="26257"

# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
export CRDB_EXT_PORT_HTTP="8081"

# Set the database username to be used by Context.
export CRDB_USERNAME="tfs"

# Set the database user's password to be used by Context.
export CRDB_PASSWORD="tfs123"

# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
export CRDB_DEPLOY_MODE="single"

# Disable flag for dropping database, if it exists.
export CRDB_DROP_DATABASE_IF_EXISTS=""

# Disable flag for re-deploying CockroachDB from scratch.
export CRDB_REDEPLOY=""


# ----- NATS -------------------------------------------------------------------

# Set the namespace where NATS will be deployed.
export NATS_NAMESPACE="nats"

# Set the external port NATS Client interface will be exposed to.
export NATS_EXT_PORT_CLIENT="4222"

# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
export NATS_EXT_PORT_HTTP="8222"

# Set NATS installation mode to 'single'. This option is convenient for development and testing.
# See ./deploy/all.sh or ./deploy/nats.sh for additional details
export NATS_DEPLOY_MODE="single"

# Disable flag for re-deploying NATS from scratch.
export NATS_REDEPLOY=""


# ----- Apache Kafka -----------------------------------------------------------

# Set the namespace where Apache Kafka will be deployed.
export KFK_NAMESPACE="kafka"

# Set the port Apache Kafka server will be exposed to.
export KFK_EXT_PORT_CLIENT="9092"

# Set Kafka installation mode to 'single'. This option is convenient for development and testing.
# See ./deploy/all.sh or ./deploy/kafka.sh for additional details
export KFK_DEPLOY_MODE="single"

# Disable flag for re-deploying Kafka from scratch.
export KFK_REDEPLOY=""


# ----- QuestDB ----------------------------------------------------------------

# Set the namespace where QuestDB will be deployed.
export QDB_NAMESPACE="qdb"

# Set the external port QuestDB Postgre SQL interface will be exposed to.
export QDB_EXT_PORT_SQL="8812"

# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
export QDB_EXT_PORT_ILP="9009"

# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
export QDB_EXT_PORT_HTTP="9000"

# Set the database username to be used for QuestDB.
export QDB_USERNAME="admin"

# Set the database user's password to be used for QuestDB.
export QDB_PASSWORD="quest"

# Set the table name to be used by Monitoring for KPIs.
export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"

# Set the table name to be used by Slice for plotting groups.
export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"

# Disable flag for dropping tables if they exist.
export QDB_DROP_TABLES_IF_EXIST=""

# Disable flag for re-deploying QuestDB from scratch.
export QDB_REDEPLOY=""


# ----- Time Series Storage - Prometheus / Grafana Mimir -----------------------

# Set Time Series Storage installation mode to 'single' (i.e., Prometheus only).
# This option is convenient for development and testing. See ./deploy/all.sh or
# ./deploy/monitoring.sh for additional details.
export TSDB_DEPLOY_MODE="single"


# ----- K8s Observability ------------------------------------------------------

# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
export PROM_EXT_PORT_HTTP="9090"

# Set the external port Grafana HTTP Dashboards will be exposed to.
export GRAF_EXT_PORT_HTTP="3000"


# ----- Telemetry Config ------------------------------------------------------

# Define a Load Balancer IP for Telemetry Collector components
export LOAD_BALANCER_IP="192.168.5.250" # <-- Change this to match your network
+0 −0

Empty file added.

+0 −0

Empty file added.

+0 −0

Empty file added.

Loading