From f2eda4dc752f3c37dab113fc46026bc1afbe1073 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 14 Jan 2026 15:23:08 +0000 Subject: [PATCH 01/79] End-to-end test - L2 VPN gNMI OpenConfig: - Added ContainerLab scenario and scripts - Added test scripts and code - Added TFS descriptors --- src/tests/l2_vpn_gnmi_oc/.gitignore | 19 ++ src/tests/l2_vpn_gnmi_oc/Dockerfile | 86 ++++++++ src/tests/l2_vpn_gnmi_oc/__init__.py | 14 ++ .../clab/cfg-static-pseudowires/r1-l2-vpn.cfg | 74 +++++++ .../clab/cfg-static-pseudowires/r2-l2-vpn.cfg | 74 +++++++ .../clab/l2_vpn_gnmi_oc.clab.yml | 71 ++++++ src/tests/l2_vpn_gnmi_oc/clab/r1-startup.cfg | 48 ++++ src/tests/l2_vpn_gnmi_oc/clab/r2-startup.cfg | 48 ++++ .../data/ietf-l2vpn-service.json | 63 ++++++ .../l2_vpn_gnmi_oc/data/tfs-service.json | 15 ++ .../l2_vpn_gnmi_oc/data/tfs-topology.json | 100 +++++++++ .../deploy-scripts/clab-cli-dc1.sh | 16 ++ .../deploy-scripts/clab-cli-dc2.sh | 16 ++ .../deploy-scripts/clab-cli-r1.sh | 16 ++ .../deploy-scripts/clab-cli-r2.sh | 16 ++ .../deploy-scripts/clab-deploy.sh | 17 ++ .../deploy-scripts/clab-destroy.sh | 18 ++ .../deploy-scripts/clab-inspect.sh | 17 ++ src/tests/l2_vpn_gnmi_oc/deploy_specs.sh | 208 ++++++++++++++++++ src/tests/l2_vpn_gnmi_oc/redeploy-tfs.sh | 17 ++ src/tests/l2_vpn_gnmi_oc/requirements.in | 15 ++ .../l2_vpn_gnmi_oc/scripts/run-cleanup.sh | 20 ++ .../l2_vpn_gnmi_oc/scripts/run-onboarding.sh | 20 ++ .../scripts/run-service-ietf-create.sh | 20 ++ .../scripts/run-service-ietf-remove.sh | 20 ++ .../scripts/run-service-tfs-create.sh | 20 ++ .../scripts/run-service-tfs-remove.sh | 20 ++ src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py | 43 ++++ src/tests/l2_vpn_gnmi_oc/tests/Tools.py | 109 +++++++++ src/tests/l2_vpn_gnmi_oc/tests/__init__.py | 14 ++ .../l2_vpn_gnmi_oc/tests/test_cleanup.py | 44 ++++ .../l2_vpn_gnmi_oc/tests/test_onboarding.py | 67 ++++++ .../tests/test_service_ietf_create.py | 71 ++++++ .../tests/test_service_ietf_remove.py | 77 +++++++ .../tests/test_service_tfs_create.py | 76 +++++++ .../tests/test_service_tfs_remove.py | 80 +++++++ 36 files changed, 1669 insertions(+) create mode 100644 src/tests/l2_vpn_gnmi_oc/.gitignore create mode 100644 src/tests/l2_vpn_gnmi_oc/Dockerfile create mode 100644 src/tests/l2_vpn_gnmi_oc/__init__.py create mode 100644 src/tests/l2_vpn_gnmi_oc/clab/cfg-static-pseudowires/r1-l2-vpn.cfg create mode 100644 src/tests/l2_vpn_gnmi_oc/clab/cfg-static-pseudowires/r2-l2-vpn.cfg create mode 100644 src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml create mode 100644 src/tests/l2_vpn_gnmi_oc/clab/r1-startup.cfg create mode 100644 src/tests/l2_vpn_gnmi_oc/clab/r2-startup.cfg create mode 100644 src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json create mode 100644 src/tests/l2_vpn_gnmi_oc/data/tfs-service.json create mode 100644 src/tests/l2_vpn_gnmi_oc/data/tfs-topology.json create mode 100755 src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-dc1.sh create mode 100755 src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-dc2.sh create mode 100755 src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r1.sh create mode 100755 src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r2.sh create mode 100755 src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-deploy.sh create mode 100755 src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-destroy.sh create mode 100755 src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-inspect.sh create mode 100755 src/tests/l2_vpn_gnmi_oc/deploy_specs.sh create mode 100755 src/tests/l2_vpn_gnmi_oc/redeploy-tfs.sh create mode 100644 src/tests/l2_vpn_gnmi_oc/requirements.in create mode 100755 src/tests/l2_vpn_gnmi_oc/scripts/run-cleanup.sh create mode 100755 src/tests/l2_vpn_gnmi_oc/scripts/run-onboarding.sh create mode 100755 src/tests/l2_vpn_gnmi_oc/scripts/run-service-ietf-create.sh create mode 100755 src/tests/l2_vpn_gnmi_oc/scripts/run-service-ietf-remove.sh create mode 100755 src/tests/l2_vpn_gnmi_oc/scripts/run-service-tfs-create.sh create mode 100755 src/tests/l2_vpn_gnmi_oc/scripts/run-service-tfs-remove.sh create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/Tools.py create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/__init__.py create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/test_cleanup.py create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/test_onboarding.py create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_create.py create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_remove.py diff --git a/src/tests/l2_vpn_gnmi_oc/.gitignore b/src/tests/l2_vpn_gnmi_oc/.gitignore new file mode 100644 index 000000000..a47dc9eff --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/.gitignore @@ -0,0 +1,19 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +clab-*/ +images/ +*.clab.yml.bak +*.tar +*.tar.gz diff --git a/src/tests/l2_vpn_gnmi_oc/Dockerfile b/src/tests/l2_vpn_gnmi_oc/Dockerfile new file mode 100644 index 000000000..e091adc53 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/Dockerfile @@ -0,0 +1,86 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Get generic Python packages +RUN python3 -m pip install --upgrade 'pip==25.2' +RUN python3 -m pip install --upgrade 'setuptools==79.0.0' 'wheel==0.45.1' +RUN python3 -m pip install --upgrade 'pip-tools==7.3.0' + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/^(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/tests/l2_vpn_gnmi_oc +WORKDIR /var/teraflow/tests/l2_vpn_gnmi_oc +COPY src/tests/l2_vpn_gnmi_oc/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/__init__.py ./__init__.py +COPY src/common/*.py ./common/ +COPY src/common/tests/. ./common/tests/ +COPY src/common/tools/. ./common/tools/ +COPY src/context/__init__.py context/__init__.py +COPY src/context/client/. context/client/ +COPY src/device/__init__.py device/__init__.py +COPY src/device/client/. device/client/ +COPY src/monitoring/__init__.py monitoring/__init__.py +COPY src/monitoring/client/. monitoring/client/ +COPY src/service/__init__.py service/__init__.py +COPY src/service/client/. service/client/ +COPY src/slice/__init__.py slice/__init__.py +COPY src/slice/client/. slice/client/ +COPY src/vnt_manager/__init__.py vnt_manager/__init__.py +COPY src/vnt_manager/client/. vnt_manager/client/ +COPY src/tests/*.py ./tests/ +COPY src/tests/l2_vpn_gnmi_oc/__init__.py ./tests/l2_vpn_gnmi_oc/__init__.py +COPY src/tests/l2_vpn_gnmi_oc/data/. ./tests/l2_vpn_gnmi_oc/data/ +COPY src/tests/l2_vpn_gnmi_oc/tests/. ./tests/l2_vpn_gnmi_oc/tests/ +COPY src/tests/l2_vpn_gnmi_oc/scripts/. ./ + +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install tree && \ + rm -rf /var/lib/apt/lists/* + +RUN tree -la /var/teraflow diff --git a/src/tests/l2_vpn_gnmi_oc/__init__.py b/src/tests/l2_vpn_gnmi_oc/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/l2_vpn_gnmi_oc/clab/cfg-static-pseudowires/r1-l2-vpn.cfg b/src/tests/l2_vpn_gnmi_oc/clab/cfg-static-pseudowires/r1-l2-vpn.cfg new file mode 100644 index 000000000..93b91da82 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/clab/cfg-static-pseudowires/r1-l2-vpn.cfg @@ -0,0 +1,74 @@ +! Command: show running-config +! device: r1 (cEOSLab, EOS-4.33.5M-43712898.4335M (engineering build)) +! +no aaa root +! +username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70 +! +management api http-commands + no shutdown +! +no service interface inactive port-id allocation disabled +! +transceiver qsfp default-mode 4x10G +! +service routing protocols model multi-agent +! +hostname r1 +! +spanning-tree mode mstp +! +system l1 + unsupported speed action error + unsupported error-correction action error +! +management api gnmi + transport grpc default +! +management api netconf + transport ssh default +! +interface Ethernet2 + no switchport + ip address 10.0.12.1/30 + mpls ldp interface +! +interface Ethernet10 + no switchport +! +interface Loopback0 + ip address 1.1.1.1/32 +! +interface Management0 + ip address 172.20.20.101/24 +! +ip routing +! +ip route 0.0.0.0/0 172.20.20.1 +ip route 2.2.2.2/32 10.0.12.2 +! +mpls ip +! +mpls ldp + router-id interface Loopback0 + no shutdown + ! + pseudowires + pseudowire pw-dc1-dc2 + neighbor 2.2.2.2 + pseudowire-id 100 + mtu 1500 +! +patch panel + patch dc1-pw + connector 1 interface Ethernet10 dot1q vlan 100 + connector 2 pseudowire ldp pw-dc1-dc2 +! +router multicast + ipv4 + software-forwarding kernel + ! + ipv6 + software-forwarding kernel +! +end diff --git a/src/tests/l2_vpn_gnmi_oc/clab/cfg-static-pseudowires/r2-l2-vpn.cfg b/src/tests/l2_vpn_gnmi_oc/clab/cfg-static-pseudowires/r2-l2-vpn.cfg new file mode 100644 index 000000000..f90737f82 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/clab/cfg-static-pseudowires/r2-l2-vpn.cfg @@ -0,0 +1,74 @@ +! Command: show running-config +! device: r2 (cEOSLab, EOS-4.33.5M-43712898.4335M (engineering build)) +! +no aaa root +! +username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70 +! +management api http-commands + no shutdown +! +no service interface inactive port-id allocation disabled +! +transceiver qsfp default-mode 4x10G +! +service routing protocols model multi-agent +! +hostname r2 +! +spanning-tree mode mstp +! +system l1 + unsupported speed action error + unsupported error-correction action error +! +management api gnmi + transport grpc default +! +management api netconf + transport ssh default +! +interface Ethernet1 + no switchport + ip address 10.0.12.2/30 + mpls ldp interface +! +interface Ethernet10 + no switchport +! +interface Loopback0 + ip address 2.2.2.2/32 +! +interface Management0 + ip address 172.20.20.102/24 +! +ip routing +! +ip route 0.0.0.0/0 172.20.20.1 +ip route 1.1.1.1/32 10.0.12.1 +! +mpls ip +! +mpls ldp + router-id interface Loopback0 + no shutdown + ! + pseudowires + pseudowire pw-dc1-dc2 + neighbor 1.1.1.1 + pseudowire-id 100 + mtu 1500 +! +patch panel + patch dc2-pw + connector 1 interface Ethernet10 dot1q vlan 100 + connector 2 pseudowire ldp pw-dc1-dc2 +! +router multicast + ipv4 + software-forwarding kernel + ! + ipv6 + software-forwarding kernel +! +end diff --git a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml new file mode 100644 index 000000000..9c69e2b99 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml @@ -0,0 +1,71 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TFS - Arista devices + Linux clients + +name: l2_vpn_gnmi_oc + +mgmt: + network: mgmt-net + ipv4-subnet: 172.20.20.0/24 + +topology: + kinds: + arista_ceos: + kind: arista_ceos + #image: ceos:4.30.4M + #image: ceos:4.31.2F + #image: ceos:4.31.5M # tested, works + #image: ceos:4.32.0F + #image: ceos:4.33.5M + image: ceos:4.34.4M + #image: ceos:4.32.2.1F + #image: ceos:4.33.1F # does not work, libyang.util.LibyangError: failed to parse data tree: No module named "openconfig-platform-healthz" in the context. + linux: + kind: linux + image: ghcr.io/hellt/network-multitool:latest + + nodes: + r1: + kind: arista_ceos + mgmt-ipv4: 172.20.20.101 + startup-config: r1-startup.cfg + + r2: + kind: arista_ceos + mgmt-ipv4: 172.20.20.102 + startup-config: r2-startup.cfg + + dc1: + kind: linux + mgmt-ipv4: 172.20.20.201 + exec: + - ip link set address 00:c1:ab:00:01:0a dev eth1 + - ip link add link eth1 name eth1.100 type vlan id 100 + - ip addr add 172.16.1.10/24 dev eth1.100 + - ip link set eth1.100 up + + dc2: + kind: linux + mgmt-ipv4: 172.20.20.202 + exec: + - ip link set address 00:c1:ab:00:01:14 dev eth1 + - ip link add link eth1 name eth1.100 type vlan id 100 + - ip addr add 172.16.1.20/24 dev eth1.100 + - ip link set eth1.100 up + + links: + - endpoints: ["r1:eth2", "r2:eth1"] + - endpoints: ["r1:eth10", "dc1:eth1"] + - endpoints: ["r2:eth10", "dc2:eth1"] diff --git a/src/tests/l2_vpn_gnmi_oc/clab/r1-startup.cfg b/src/tests/l2_vpn_gnmi_oc/clab/r1-startup.cfg new file mode 100644 index 000000000..712797deb --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/clab/r1-startup.cfg @@ -0,0 +1,48 @@ +! device: r1 (cEOSLab, EOS-4.34.4M) +! +no aaa root +! +username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70 +! +management api http-commands + no shutdown +! +no service interface inactive port-id allocation disabled +! +transceiver qsfp default-mode 4x10G +! +service routing protocols model multi-agent +! +hostname r1 +! +spanning-tree mode mstp +! +system l1 + unsupported speed action error + unsupported error-correction action error +! +management api gnmi + transport grpc default +! +management api netconf + transport ssh default +! +interface Ethernet2 +! +interface Ethernet10 +! +interface Management0 + ip address 172.20.20.101/24 +! +ip routing +! +ip route 0.0.0.0/0 172.20.20.1 +! +router multicast + ipv4 + software-forwarding kernel + ! + ipv6 + software-forwarding kernel +! +end diff --git a/src/tests/l2_vpn_gnmi_oc/clab/r2-startup.cfg b/src/tests/l2_vpn_gnmi_oc/clab/r2-startup.cfg new file mode 100644 index 000000000..dbba5fbeb --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/clab/r2-startup.cfg @@ -0,0 +1,48 @@ +! device: r2 (cEOSLab, EOS-4.34.4M) +! +no aaa root +! +username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70 +! +management api http-commands + no shutdown +! +no service interface inactive port-id allocation disabled +! +transceiver qsfp default-mode 4x10G +! +service routing protocols model multi-agent +! +hostname r2 +! +spanning-tree mode mstp +! +system l1 + unsupported speed action error + unsupported error-correction action error +! +management api gnmi + transport grpc default +! +management api netconf + transport ssh default +! +interface Ethernet1 +! +interface Ethernet10 +! +interface Management0 + ip address 172.20.20.102/24 +! +ip routing +! +ip route 0.0.0.0/0 172.20.20.1 +! +router multicast + ipv4 + software-forwarding kernel + ! + ipv6 + software-forwarding kernel +! +end diff --git a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json new file mode 100644 index 000000000..b649400eb --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json @@ -0,0 +1,63 @@ +{ + "ietf-l2vpn-svc:l2vpn-svc": { + "vpn-services": {"vpn-service": [{"vpn-id": "ietf-l2vpn-svc"}]}, + "sites": { + "site": [ + { + "site-id": "site_DC1", + "management": {"type": "ietf-l2vpn-svc:provider-managed"}, + "locations": {"location": [{"location-id": "DC1"}]}, + "devices": {"device": [{"device-id": "dc1", "location": "DC1"}]}, + "site-network-accesses": { + "site-network-access": [ + { + "site-network-access-id": "eth1", + "site-network-access-type": "ietf-l2vpn-svc:multipoint", + "device-reference": "dc1", + "vpn-attachment": {"vpn-id": "ietf-l2vpn-svc", "site-role": "ietf-l2vpn-svc:spoke-role"}, + "service": { + "svc-mtu": 1500, + "svc-input-bandwidth": 1000000000, + "svc-output-bandwidth": 1000000000, + "qos": {"qos-profile": {"classes": {"class": [{ + "class-id": "qos-realtime", + "direction": "ietf-l2vpn-svc:both", + "latency": {"latency-boundary": 10}, + "bandwidth": {"guaranteed-bw-percent": 100} + }]}}} + } + } + ] + } + }, + { + "site-id": "site_DC2", + "management": {"type": "ietf-l2vpn-svc:provider-managed"}, + "locations": {"location": [{"location-id": "DC2"}]}, + "devices": {"device": [{"device-id": "dc2", "location": "DC2"}]}, + "site-network-accesses": { + "site-network-access": [ + { + "site-network-access-id": "eth1", + "site-network-access-type": "ietf-l2vpn-svc:multipoint", + "device-reference": "dc2", + "vpn-attachment": {"vpn-id": "ietf-l2vpn-svc", "site-role": "ietf-l2vpn-svc:hub-role"}, + "service": { + "svc-mtu": 1500, + "svc-input-bandwidth": 1000000000, + "svc-output-bandwidth": 1000000000, + "qos": {"qos-profile": {"classes": {"class": [{ + "class-id": "qos-realtime", + "direction": "ietf-l2vpn-svc:both", + "latency": {"latency-boundary": 10}, + "bandwidth": {"guaranteed-bw-percent": 100} + }]}}} + } + } + ] + } + } + ] + } + } +} diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-service.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-service.json new file mode 100644 index 000000000..f7b589e04 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/data/tfs-service.json @@ -0,0 +1,15 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "tfs-l2vpn-svc"} + }, + "service_type": "SERVICETYPE_L2NM", + "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "int"}}, + {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "int"}} + ] + } + ] +} diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-topology.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-topology.json new file mode 100644 index 000000000..49df9de42 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/data/tfs-topology.json @@ -0,0 +1,100 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "dc1"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "eth1", "type": "copper"}, {"uuid": "int", "type": "copper"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "dc2"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "eth1", "type": "copper"}, {"uuid": "int", "type": "copper"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "r1"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.101"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "admin", "use_tls": false + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "r2"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.102"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "admin", "use_tls": false + }}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "r1/Ethernet2==r2/Ethernet1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet2"}}, + {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "r2/Ethernet1==r1/Ethernet2"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet1"}}, + {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet2"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "r1/Ethernet10==dc1/eth1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet10"}}, + {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "eth1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "dc1/eth1==r1/Ethernet10"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "eth1"}}, + {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet10"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "r2/Ethernet10==dc2/eth1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet10"}}, + {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "eth1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "dc2/eth1==r2/Ethernet10"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "eth1"}}, + {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet10"}} + ] + } + ] +} diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-dc1.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-dc1.sh new file mode 100755 index 000000000..94c4d7d51 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-dc1.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker exec -it clab-l2_vpn_gnmi_oc-dc1 bash diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-dc2.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-dc2.sh new file mode 100755 index 000000000..9d6e84b1f --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-dc2.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker exec -it clab-l2_vpn_gnmi_oc-dc2 bash diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r1.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r1.sh new file mode 100755 index 000000000..26d39cfcd --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r1.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker exec -it clab-l2_vpn_gnmi_oc-r1 Cli diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r2.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r2.sh new file mode 100755 index 000000000..e6ee51ec9 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r2.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker exec -it clab-l2_vpn_gnmi_oc-r2 Cli diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-deploy.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-deploy.sh new file mode 100755 index 000000000..294f680b2 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-deploy.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cd ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc +sudo containerlab deploy --topo clab/l2_vpn_gnmi_oc.clab.yml diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-destroy.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-destroy.sh new file mode 100755 index 000000000..68f7c30b1 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-destroy.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cd ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc +sudo containerlab destroy --topo clab/l2_vpn_gnmi_oc.clab.yml +sudo rm -rf clab/clab-l2_vpn_gnmi_oc/ clab/.l2_vpn_gnmi_oc.clab.yml.bak diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-inspect.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-inspect.sh new file mode 100755 index 000000000..2a8325f59 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-inspect.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cd ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc +sudo containerlab inspect --topo l2_vpn_gnmi_oc.clab.yml diff --git a/src/tests/l2_vpn_gnmi_oc/deploy_specs.sh b/src/tests/l2_vpn_gnmi_oc/deploy_specs.sh new file mode 100755 index 000000000..72cd25b58 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy_specs.sh @@ -0,0 +1,208 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +#export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator" +export TFS_COMPONENTS="context device pathcomp service nbi" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="YES" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_SERVER_PORT="9092" + +# Set the flag to YES for redeploying of Apache Kafka +export KFK_REDEPLOY="" diff --git a/src/tests/l2_vpn_gnmi_oc/redeploy-tfs.sh b/src/tests/l2_vpn_gnmi_oc/redeploy-tfs.sh new file mode 100755 index 000000000..d3e80c108 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/redeploy-tfs.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc/deploy_specs.sh +./deploy/all.sh diff --git a/src/tests/l2_vpn_gnmi_oc/requirements.in b/src/tests/l2_vpn_gnmi_oc/requirements.in new file mode 100644 index 000000000..5c92783a2 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/requirements.in @@ -0,0 +1,15 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +requests==2.27.* diff --git a/src/tests/l2_vpn_gnmi_oc/scripts/run-cleanup.sh b/src/tests/l2_vpn_gnmi_oc/scripts/run-cleanup.sh new file mode 100755 index 000000000..0bdae7627 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/scripts/run-cleanup.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_cleanup.xml \ + /var/teraflow/tests/l2_vpn_gnmi_oc/tests/test_cleanup.py diff --git a/src/tests/l2_vpn_gnmi_oc/scripts/run-onboarding.sh b/src/tests/l2_vpn_gnmi_oc/scripts/run-onboarding.sh new file mode 100755 index 000000000..36b2ab292 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/scripts/run-onboarding.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_onboarding.xml \ + /var/teraflow/tests/l2_vpn_gnmi_oc/tests/test_onboarding.py diff --git a/src/tests/l2_vpn_gnmi_oc/scripts/run-service-ietf-create.sh b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-ietf-create.sh new file mode 100755 index 000000000..4c60250bb --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-ietf-create.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_service_ietf_create.xml \ + /var/teraflow/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py diff --git a/src/tests/l2_vpn_gnmi_oc/scripts/run-service-ietf-remove.sh b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-ietf-remove.sh new file mode 100755 index 000000000..02fabe7d7 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-ietf-remove.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_service_ietf_remove.xml \ + /var/teraflow/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py diff --git a/src/tests/l2_vpn_gnmi_oc/scripts/run-service-tfs-create.sh b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-tfs-create.sh new file mode 100755 index 000000000..91dc1f2aa --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-tfs-create.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_service_tfs_create.xml \ + /var/teraflow/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_create.py diff --git a/src/tests/l2_vpn_gnmi_oc/scripts/run-service-tfs-remove.sh b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-tfs-remove.sh new file mode 100755 index 000000000..d170a6ef4 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-tfs-remove.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_service_tfs_remove.xml \ + /var/teraflow/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_remove.py diff --git a/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py b/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py new file mode 100644 index 000000000..5997e58c8 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py @@ -0,0 +1,43 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from monitoring.client.MonitoringClient import MonitoringClient +from service.client.ServiceClient import ServiceClient + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def monitoring_client(): + _client = MonitoringClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def service_client(): + _client = ServiceClient() + yield _client + _client.close() diff --git a/src/tests/l2_vpn_gnmi_oc/tests/Tools.py b/src/tests/l2_vpn_gnmi_oc/tests/Tools.py new file mode 100644 index 000000000..bbee845cd --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/Tools.py @@ -0,0 +1,109 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, logging, requests +from typing import Any, Dict, List, Optional, Set, Union +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_service_port_http + +NBI_ADDRESS = get_service_host(ServiceNameEnum.NBI) +NBI_PORT = get_service_port_http(ServiceNameEnum.NBI) +NBI_USERNAME = 'admin' +NBI_PASSWORD = 'admin' +NBI_BASE_URL = '' + +class RestRequestMethod(enum.Enum): + GET = 'get' + POST = 'post' + PUT = 'put' + PATCH = 'patch' + DELETE = 'delete' + +EXPECTED_STATUS_CODES : Set[int] = { + requests.codes['OK' ], + requests.codes['CREATED' ], + requests.codes['ACCEPTED' ], + requests.codes['NO_CONTENT'], +} + +def do_rest_request( + method : RestRequestMethod, url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + request_url = 'http://{:s}:{:s}@{:s}:{:d}{:s}{:s}'.format( + NBI_USERNAME, NBI_PASSWORD, NBI_ADDRESS, NBI_PORT, str(NBI_BASE_URL), url + ) + + if logger is not None: + msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) + if body is not None: msg += ' body={:s}'.format(str(body)) + logger.warning(msg) + reply = requests.request(method.value, request_url, timeout=timeout, json=body, allow_redirects=allow_redirects) + if logger is not None: + logger.warning('Reply: {:s}'.format(str(reply.text))) + assert reply.status_code in expected_status_codes, 'Reply failed with status code {:d}'.format(reply.status_code) + + if reply.content and len(reply.content) > 0: return reply.json() + return None + +def do_rest_get_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.GET, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_post_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.POST, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_put_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.PUT, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_patch_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.PATCH, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_delete_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.DELETE, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/__init__.py b/src/tests/l2_vpn_gnmi_oc/tests/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_cleanup.py b/src/tests/l2_vpn_gnmi_oc/tests/test_cleanup.py new file mode 100644 index 000000000..20afb5fe0 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_cleanup.py @@ -0,0 +1,44 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from .Fixtures import context_client, device_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-topology.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_scenario_cleanup( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + descriptor_loader.validate() + descriptor_loader.unload() + validate_empty_scenario(context_client) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_onboarding.py b/src/tests/l2_vpn_gnmi_oc/tests/test_onboarding.py new file mode 100644 index 000000000..763d7da17 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_onboarding.py @@ -0,0 +1,67 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os, time +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from .Fixtures import context_client, device_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-topology.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_scenario_onboarding( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + validate_empty_scenario(context_client) + + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() + + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + +def test_scenario_devices_enabled( + context_client : ContextClient, # pylint: disable=redefined-outer-name +) -> None: + """ + This test validates that the devices are enabled. + """ + DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + + num_devices = -1 + num_devices_enabled, num_retry = 0, 0 + while (num_devices != num_devices_enabled) and (num_retry < 10): + time.sleep(1.0) + response = context_client.ListDevices(Empty()) + num_devices = len(response.devices) + num_devices_enabled = 0 + for device in response.devices: + if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue + num_devices_enabled += 1 + LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices)) + num_retry += 1 + assert num_devices_enabled == num_devices diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py new file mode 100644 index 000000000..92f52c688 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py @@ -0,0 +1,71 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, os +from typing import Dict +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from .Fixtures import context_client # pylint: disable=unused-import +from .Tools import do_rest_get_request, do_rest_post_request + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +REQUEST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'ietf-l2vpn-service.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + + +# pylint: disable=redefined-outer-name, unused-argument +def test_service_ietf_creation( + context_client : ContextClient, +): + # Issue service creation request + with open(REQUEST_FILE, 'r', encoding='UTF-8') as f: + svc1_data = json.load(f) + URL = '/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services' + do_rest_post_request(URL, body=svc1_data, logger=LOGGER, expected_status_codes={201}) + vpn_id = svc1_data['ietf-l2vpn-svc:l2vpn-svc']['vpn-services']['vpn-service'][0]['vpn-id'] + + URL = '/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={:s}/'.format(vpn_id) + service_data = do_rest_get_request(URL, logger=LOGGER, expected_status_codes={200}) + service_uuid = service_data['service-id'] + + # Verify service was created + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 1 + assert len(response.slice_ids) == 0 + + # Check there is 1 service + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.warning('Services[{:d}] = {:s}'.format( + len(response.services), grpc_message_to_json_string(response) + )) + assert len(response.services) == 1 + + for service in response.services: + service_id = service.service_id + assert service_id.service_uuid.uuid == service_uuid + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + assert service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM + + response = context_client.ListConnections(service_id) + LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response) + )) + assert len(response.connections) == 1 diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py new file mode 100644 index 000000000..f08dae2af --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py @@ -0,0 +1,77 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os +from typing import Dict, Set, Tuple +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from .Fixtures import context_client # pylint: disable=unused-import +from .Tools import do_rest_delete_request + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +REQUEST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'ietf-l2vpn-service.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + + +# pylint: disable=redefined-outer-name, unused-argument +def test_service_ietf_removal( + context_client : ContextClient, # pylint: disable=redefined-outer-name +): + # Verify the scenario has 1 service and 0 slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 1 + assert len(response.slice_ids) == 0 + + # Check there are no slices + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.warning('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 0 + + # Check there is 1 service + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 1 + + service_uuids : Set[str] = set() + for service in response.services: + service_id = service.service_id + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + assert service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM + + response = context_client.ListConnections(service_id) + LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response) + )) + assert len(response.connections) == 1 + + service_uuids.add(service_id.service_uuid.uuid) + + # Identify service to delete + assert len(service_uuids) == 1 + service_uuid = set(service_uuids).pop() + + URL = '/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={:s}/'.format(service_uuid) + do_rest_delete_request(URL, logger=LOGGER, expected_status_codes={204}) + + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_create.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_create.py new file mode 100644 index 000000000..87c0f7909 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_create.py @@ -0,0 +1,76 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from .Fixtures import context_client, device_client, service_client # pylint: disable=unused-import + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-service.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + + +def test_service_tfs_creation( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient, # pylint: disable=redefined-outer-name +): + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, + device_client=device_client, service_client=service_client + ) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + + # Verify the scenario has 1 service and 0 slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 1 + assert len(response.slice_ids) == 0 + + # Check there are no slices + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.warning('Slices[{:d}] = {:s}'.format( + len(response.slices), grpc_message_to_json_string(response) + )) + assert len(response.slices) == 0 + + # Check there is 1 service + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.warning('Services[{:d}] = {:s}'.format( + len(response.services), grpc_message_to_json_string(response) + )) + assert len(response.services) == 1 + + for service in response.services: + service_id = service.service_id + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + assert service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM + + response = context_client.ListConnections(service_id) + LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response) + )) + assert len(response.connections) == 1 diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_remove.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_remove.py new file mode 100644 index 000000000..15236da94 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_remove.py @@ -0,0 +1,80 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os +from typing import Set, Tuple +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, ServiceId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Service import json_service_id +from context.client.ContextClient import ContextClient +from service.client.ServiceClient import ServiceClient +from .Fixtures import context_client, service_client # pylint: disable=unused-import + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-service.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + + +def test_service_tfs_removal( + context_client : ContextClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient, # pylint: disable=redefined-outer-name +): + # Verify the scenario has 1 service and 0 slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 1 + assert len(response.slice_ids) == 0 + + # Check there are no slices + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.warning('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 0 + + # Check there is 1 service + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 1 + + context_service_uuids : Set[Tuple[str, str]] = set() + for service in response.services: + service_id = service.service_id + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + assert service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM + + response = context_client.ListConnections(service_id) + LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response) + )) + assert len(response.connections) == 1 + + context_uuid = service_id.context_id.context_uuid.uuid + service_uuid = service_id.service_uuid.uuid + context_service_uuids.add((context_uuid, service_uuid)) + + # Identify service to delete + assert len(context_service_uuids) == 1 + context_uuid, service_uuid = set(context_service_uuids).pop() + + # Delete Service + service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid)))) + + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 -- GitLab From 777d7dfff235983ae29e41d577ca473a8f56221c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 14 Jan 2026 15:25:05 +0000 Subject: [PATCH 02/79] Device component - gNMI OpenConfig Driver: - Implemented handlers for MPLS management - Implemented Network Instance handlers for Connection Point, Endpoint, Vlan - Added load of MPLS models --- .../drivers/gnmi_openconfig/handlers/Mpls.py | 121 ++++++++++++++++++ .../NetworkInstanceConnectionPoint.py | 57 +++++++++ .../handlers/NetworkInstanceEndpoint.py | 89 +++++++++++++ .../handlers/NetworkInstanceVlan.py | 66 ++++++++++ .../gnmi_openconfig/handlers/YangHandler.py | 2 + .../gnmi_openconfig/handlers/__init__.py | 29 +++++ 6 files changed, 364 insertions(+) create mode 100644 src/device/service/drivers/gnmi_openconfig/handlers/Mpls.py create mode 100644 src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceConnectionPoint.py create mode 100644 src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceEndpoint.py create mode 100644 src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceVlan.py diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/Mpls.py b/src/device/service/drivers/gnmi_openconfig/handlers/Mpls.py new file mode 100644 index 000000000..cadf35ce8 --- /dev/null +++ b/src/device/service/drivers/gnmi_openconfig/handlers/Mpls.py @@ -0,0 +1,121 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, re +from typing import Any, Dict, List, Tuple +from ._Handler import _Handler +from .Tools import get_int, get_str +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +RE_MPLS_INTERFACE = re.compile(r'^/mpls/interface\[([^\]]+)\]$') +DEFAULT_NETWORK_INSTANCE = 'default' + +class MplsHandler(_Handler): + def get_resource_key(self) -> str: return '/mpls' + def get_path(self) -> str: + return '/openconfig-network-instance:network-instances/network-instance/mpls' + + def compose( + self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False + ) -> Tuple[str, str]: + """ + Compose MPLS (global or per-interface) configuration. + - Global: set LDP router-id (lsr-id) and optional hello timers. + - Interface: set LDP interface-id and optional hello timers. + """ + ni_name = get_str(resource_value, 'network_instance', DEFAULT_NETWORK_INSTANCE) + ni_type = get_str(resource_value, 'network_instance_type') + if ni_type is None and ni_name == DEFAULT_NETWORK_INSTANCE: + ni_type = 'openconfig-network-instance-types:DEFAULT_INSTANCE' + + yang_nis : Any = yang_handler.get_data_path('/openconfig-network-instance:network-instances') + yang_ni : Any = yang_nis.create_path('network-instance[name="{:s}"]'.format(ni_name)) + yang_ni.create_path('config/name', ni_name) + if ni_type is not None: + yang_ni.create_path('config/type', ni_type) + + match_if = RE_MPLS_INTERFACE.match(resource_key) + if delete: + if match_if: + if_name = match_if.group(1) + str_path = ( + '/network-instances/network-instance[name={:s}]/mpls/signaling-protocols/ldp' + '/interface-attributes/interfaces/interface[interface-id={:s}]' + ).format(ni_name, if_name) + else: + str_path = '/network-instances/network-instance[name={:s}]/mpls'.format(ni_name) + return str_path, json.dumps({}) + + if match_if: + if_name = match_if.group(1) + hello_interval = get_int(resource_value, 'hello_interval') + hello_holdtime = get_int(resource_value, 'hello_holdtime') + + path_if_base = ( + 'mpls/signaling-protocols/ldp/interface-attributes/interfaces' + '/interface[interface-id="{:s}"]/config' + ).format(if_name) + yang_ni.create_path('{:s}/interface-id'.format(path_if_base), if_name) + if hello_interval is not None: + yang_ni.create_path('{:s}/hello-interval'.format(path_if_base), hello_interval) + if hello_holdtime is not None: + yang_ni.create_path('{:s}/hello-holdtime'.format(path_if_base), hello_holdtime) + + yang_if : Any = yang_ni.find_path( + 'mpls/signaling-protocols/ldp/interface-attributes/interfaces' + '/interface[interface-id="{:s}"]'.format(if_name) + ) + + str_path = ( + '/network-instances/network-instance[name={:s}]/mpls/signaling-protocols/ldp' + '/interface-attributes/interfaces/interface[interface-id={:s}]' + ).format(ni_name, if_name) + json_data = json.loads(yang_if.print_mem('json')) + json_data = json_data['openconfig-network-instance:interface'][0] + str_data = json.dumps(json_data) + return str_path, str_data + + # Global LDP configuration + ldp_cfg = resource_value.get('ldp', resource_value) + lsr_id = get_str(ldp_cfg, 'lsr_id') + hello_interval = get_int(ldp_cfg, 'hello_interval') + hello_holdtime = get_int(ldp_cfg, 'hello_holdtime') + + if lsr_id is not None: + yang_ni.create_path('mpls/signaling-protocols/ldp/global/config/lsr-id', lsr_id) + if hello_interval is not None: + yang_ni.create_path( + 'mpls/signaling-protocols/ldp/interface-attributes/config/hello-interval', hello_interval + ) + if hello_holdtime is not None: + yang_ni.create_path( + 'mpls/signaling-protocols/ldp/interface-attributes/config/hello-holdtime', hello_holdtime + ) + + yang_ldp : Any = yang_ni.find_path('mpls/signaling-protocols/ldp') + + str_path = '/network-instances/network-instance[name={:s}]/mpls/signaling-protocols/ldp'.format(ni_name) + json_data = json.loads(yang_ldp.print_mem('json')) + json_data = json_data['openconfig-network-instance:ldp'] + str_data = json.dumps(json_data) + return str_path, str_data + + def parse( + self, json_data : Dict, yang_handler : YangHandler + ) -> List[Tuple[str, Dict[str, Any]]]: + LOGGER.debug('[parse] json_data = %s', json.dumps(json_data)) + # Not required for current tests (L2VPN validation focuses on SetConfig). + return [] diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceConnectionPoint.py b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceConnectionPoint.py new file mode 100644 index 000000000..3ff259c5d --- /dev/null +++ b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceConnectionPoint.py @@ -0,0 +1,57 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import Any, Dict, List, Tuple +from ._Handler import _Handler +from .Tools import get_str +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +class NetworkInstanceConnectionPointHandler(_Handler): + def get_resource_key(self) -> str: return '/network_instance/connection_point' + def get_path(self) -> str: + return '/openconfig-network-instance:network-instances/network-instance/connection-points/connection-point' + + def compose( + self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False + ) -> Tuple[str, str]: + ni_name = get_str(resource_value, 'name') + cp_id = get_str(resource_value, 'connection_point_id') + + str_path = ( + '/network-instances/network-instance[name={:s}]/connection-points' + '/connection-point[connection-point-id={:s}]' + ).format(ni_name, cp_id) + if delete: + return str_path, json.dumps({}) + + yang_nis : Any = yang_handler.get_data_path('/openconfig-network-instance:network-instances') + path_cp_base = ( + 'network-instance[name="{:s}"]/connection-points' + '/connection-point[connection-point-id="{:s}"]' + ).format(ni_name, cp_id) + yang_nis.create_path('{:s}/config/connection-point-id'.format(path_cp_base), cp_id) + + yang_cp : Any = yang_nis.find_path(path_cp_base) + json_data = json.loads(yang_cp.print_mem('json')) + json_data = json_data['openconfig-network-instance:connection-point'][0] + return str_path, json.dumps(json_data) + + def parse( + self, json_data : Dict, yang_handler : YangHandler + ) -> List[Tuple[str, Dict[str, Any]]]: + LOGGER.debug('[parse] json_data = %s', json.dumps(json_data)) + return [] diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceEndpoint.py b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceEndpoint.py new file mode 100644 index 000000000..81ffe524e --- /dev/null +++ b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceEndpoint.py @@ -0,0 +1,89 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import Any, Dict, List, Tuple +from ._Handler import _Handler +from .Tools import get_int, get_str +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +class NetworkInstanceEndpointHandler(_Handler): + def get_resource_key(self) -> str: return '/network_instance/connection_point/endpoint' + def get_path(self) -> str: + return '/openconfig-network-instance:network-instances/network-instance/connection-points/connection-point/endpoints/endpoint' + + def compose( + self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False + ) -> Tuple[str, str]: + ni_name = get_str(resource_value, 'name') + cp_id = get_str(resource_value, 'connection_point_id') + ep_id = get_str(resource_value, 'endpoint_id') + ep_type = get_str(resource_value, 'type') + precedence = get_int(resource_value, 'precedence') + + str_path = ( + '/network-instances/network-instance[name={:s}]/connection-points/connection-point' + '[connection-point-id={:s}]/endpoints/endpoint[endpoint-id={:s}]' + ).format(ni_name, cp_id, ep_id) + if delete: + return str_path, json.dumps({}) + + if ep_type is not None and ':' not in ep_type: + ep_type = 'openconfig-network-instance-types:{:s}'.format(ep_type) + + yang_nis : Any = yang_handler.get_data_path('/openconfig-network-instance:network-instances') + path_ep_base = ( + 'network-instance[name="{:s}"]/connection-points/connection-point[connection-point-id="{:s}"]' + '/endpoints/endpoint[endpoint-id="{:s}"]' + ).format(ni_name, cp_id, ep_id) + yang_nis.create_path('{:s}/config/endpoint-id'.format(path_ep_base), ep_id) + if ep_type is not None: + yang_nis.create_path('{:s}/config/type'.format(path_ep_base), ep_type) + if precedence is not None: + yang_nis.create_path('{:s}/config/precedence'.format(path_ep_base), precedence) + + if ep_type and ep_type.endswith('LOCAL'): + if_name = get_str(resource_value, 'interface') + sif_index = get_int(resource_value, 'subinterface', 0) + if if_name is not None: + yang_nis.create_path('{:s}/local/config/interface'.format(path_ep_base), if_name) + yang_nis.create_path('{:s}/local/config/subinterface'.format(path_ep_base), sif_index) + site_id = get_int(resource_value, 'site_id') + if site_id is not None: + yang_nis.create_path('{:s}/local/config/site-id'.format(path_ep_base), site_id) + elif ep_type and ep_type.endswith('REMOTE'): + remote_system = get_str(resource_value, 'remote_system') + vc_id = get_int(resource_value, 'virtual_circuit_id') + if remote_system is not None: + yang_nis.create_path('{:s}/remote/config/remote-system'.format(path_ep_base), remote_system) + if vc_id is not None: + yang_nis.create_path( + '{:s}/remote/config/virtual-circuit-identifier'.format(path_ep_base), vc_id + ) + site_id = get_int(resource_value, 'site_id') + if site_id is not None: + yang_nis.create_path('{:s}/remote/config/site-id'.format(path_ep_base), site_id) + + yang_ep : Any = yang_nis.find_path(path_ep_base) + json_data = json.loads(yang_ep.print_mem('json')) + json_data = json_data['openconfig-network-instance:endpoint'][0] + return str_path, json.dumps(json_data) + + def parse( + self, json_data : Dict, yang_handler : YangHandler + ) -> List[Tuple[str, Dict[str, Any]]]: + LOGGER.debug('[parse] json_data = %s', json.dumps(json_data)) + return [] diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceVlan.py b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceVlan.py new file mode 100644 index 000000000..a9e3fb009 --- /dev/null +++ b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceVlan.py @@ -0,0 +1,66 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import Any, Dict, List, Tuple, Union +from ._Handler import _Handler +from .Tools import get_int, get_str +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +class NetworkInstanceVlanHandler(_Handler): + def get_resource_key(self) -> str: return '/network_instance/vlan' + def get_path(self) -> str: + return '/openconfig-network-instance:network-instances/network-instance/vlans/vlan' + + def compose( + self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False + ) -> Tuple[str, str]: + ni_name = get_str(resource_value, 'name', 'default') + vlan_id = get_int(resource_value, 'vlan_id') + vlan_name = get_str(resource_value, 'vlan_name') + str_path = '/network-instances/network-instance[name={:s}]/vlans/vlan[vlan-id={:d}]'.format( + ni_name, vlan_id + ) + if delete: + yang_nis : Any = yang_handler.get_data_path('/openconfig-network-instance:network-instances') + yang_vlan = yang_nis.find_path('network-instance[name="{:s}"]/vlans/vlan[vlan-id="{:d}"]'.format( + ni_name, vlan_id)) + if yang_vlan is not None: + yang_vlan.unlink() + yang_vlan.free() + return str_path, json.dumps({}) + + yang_nis : Any = yang_handler.get_data_path('/openconfig-network-instance:network-instances') + yang_ni : Any = yang_nis.create_path('network-instance[name="{:s}"]'.format(ni_name)) + yang_ni.create_path('config/name', ni_name) + if ni_name == 'default': + yang_ni.create_path('config/type', 'openconfig-network-instance-types:DEFAULT_INSTANCE') + + yang_vlans : Any = yang_ni.create_path('vlans') + yang_vlan : Any = yang_vlans.create_path('vlan[vlan-id="{:d}"]'.format(vlan_id)) + yang_vlan.create_path('config/vlan-id', vlan_id) + if vlan_name is not None: + yang_vlan.create_path('config/name', vlan_name) + + json_data = json.loads(yang_vlan.print_mem('json')) + json_data = json_data['openconfig-network-instance:vlan'][0] + return str_path, json.dumps(json_data) + + def parse( + self, json_data : Dict, yang_handler : YangHandler + ) -> List[Tuple[str, Dict[str, Any]]]: + LOGGER.debug('[parse] json_data = %s', json.dumps(json_data)) + return [] diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/YangHandler.py b/src/device/service/drivers/gnmi_openconfig/handlers/YangHandler.py index 5e1ea3b43..684e09efb 100644 --- a/src/device/service/drivers/gnmi_openconfig/handlers/YangHandler.py +++ b/src/device/service/drivers/gnmi_openconfig/handlers/YangHandler.py @@ -42,6 +42,8 @@ YANG_MODULES = [ 'openconfig-types', 'openconfig-policy-types', 'openconfig-mpls-types', + 'openconfig-mpls', + 'openconfig-mpls-ldp', 'openconfig-network-instance-types', 'openconfig-network-instance', 'openconfig-acl', diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py b/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py index 3ce655353..61ce7e675 100644 --- a/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py +++ b/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py @@ -23,6 +23,10 @@ from .NetworkInstance import NetworkInstanceHandler from .NetworkInstanceInterface import NetworkInstanceInterfaceHandler from .NetworkInstanceProtocol import NetworkInstanceProtocolHandler from .NetworkInstanceStaticRoute import NetworkInstanceStaticRouteHandler +from .NetworkInstanceConnectionPoint import NetworkInstanceConnectionPointHandler +from .NetworkInstanceEndpoint import NetworkInstanceEndpointHandler +from .NetworkInstanceVlan import NetworkInstanceVlanHandler +from .Mpls import MplsHandler from .Acl import AclHandler from .Tools import get_schema from .YangHandler import YangHandler @@ -36,6 +40,10 @@ nih = NetworkInstanceHandler() niifh = NetworkInstanceInterfaceHandler() niph = NetworkInstanceProtocolHandler() nisrh = NetworkInstanceStaticRouteHandler() +nicph = NetworkInstanceConnectionPointHandler() +nieph = NetworkInstanceEndpointHandler() +nivlh = NetworkInstanceVlanHandler() +mplsh = MplsHandler() aclh = AclHandler() ALL_RESOURCE_KEYS = [ @@ -49,6 +57,10 @@ RESOURCE_KEY_MAPPER = { RESOURCE_ENDPOINTS : comph.get_resource_key(), RESOURCE_INTERFACES : ifaceh.get_resource_key(), RESOURCE_NETWORK_INSTANCES : nih.get_resource_key(), + '/interface' : ifaceh.get_resource_key(), + '/mpls' : mplsh.get_resource_key(), + '/network_instance/vlan' : nivlh.get_resource_key(), + '/mpls/interface' : mplsh.get_resource_key(), RESOURCE_ACL : aclh.get_resource_key(), } @@ -57,6 +69,15 @@ PATH_MAPPER = { '/components/component' : comph.get_path(), '/interfaces' : ifaceh.get_path(), '/network-instances' : nih.get_path(), + '/network-instances/network-instance/connection-points/connection-point' + : nicph.get_path(), + '/network-instances/network-instance/connection-points/connection-point/endpoints/endpoint' + : nieph.get_path(), + '/network-instances/network-instance/vlans/vlan' + : nivlh.get_path(), + '/mpls' : mplsh.get_path(), + '/network-instances/network-instance/mpls' + : mplsh.get_path(), '/acl' : aclh.get_path(), } @@ -68,6 +89,10 @@ RESOURCE_KEY_TO_HANDLER = { niifh.get_resource_key() : niifh, niph.get_resource_key() : niph, nisrh.get_resource_key() : nisrh, + nicph.get_resource_key() : nicph, + nieph.get_resource_key() : nieph, + nivlh.get_resource_key() : nivlh, + mplsh.get_resource_key() : mplsh, aclh.get_resource_key() : aclh, } @@ -79,6 +104,10 @@ PATH_TO_HANDLER = { niifh.get_path() : niifh, niph.get_path() : niph, nisrh.get_path() : nisrh, + nicph.get_path() : nicph, + nieph.get_path() : nieph, + nivlh.get_path() : nivlh, + mplsh.get_path() : mplsh, aclh.get_path() : aclh, } -- GitLab From 6ddf5dc24079519cd5051167ee020d4ac16fe0fb Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 14 Jan 2026 15:28:12 +0000 Subject: [PATCH 03/79] Device component - gNMI OpenConfig Driver: - Added test code for L2 VPN configuration --- .../tests/gnmi_openconfig/Dockerfile.l2vpn | 85 +++ .../test_unitary_gnmi_oc_arista_l2vpn.py | 665 ++++-------------- .../tools/request_composers.py | 85 ++- 3 files changed, 289 insertions(+), 546 deletions(-) create mode 100644 src/device/tests/gnmi_openconfig/Dockerfile.l2vpn diff --git a/src/device/tests/gnmi_openconfig/Dockerfile.l2vpn b/src/device/tests/gnmi_openconfig/Dockerfile.l2vpn new file mode 100644 index 000000000..2270ed3ed --- /dev/null +++ b/src/device/tests/gnmi_openconfig/Dockerfile.l2vpn @@ -0,0 +1,85 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Minimal reuse of device component Dockerfile steps to run L2VPN tests +FROM python:3.9-slim + +# Base deps + libyang build +RUN apt-get update -qq && apt-get install -y -qq wget g++ git build-essential cmake libpcre2-dev python3-dev python3-cffi && rm -rf /var/lib/apt/lists/* +RUN mkdir -p /var/libyang && git clone https://github.com/CESNET/libyang.git /var/libyang +WORKDIR /var/libyang +RUN git fetch && git checkout v2.1.148 && mkdir -p build +WORKDIR /var/libyang/build +RUN cmake -D CMAKE_BUILD_TYPE:String="Release" .. && make && make install && ldconfig + +ENV PYTHONUNBUFFERED=0 + +# Python toolchain +RUN python3 -m pip install --upgrade 'pip==25.2' +RUN python3 -m pip install --upgrade 'setuptools==79.0.0' 'wheel==0.45.1' +RUN python3 -m pip install --upgrade 'pip-tools==7.3.0' + +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Common files + proto +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/^(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Device deps +WORKDIR /var/teraflow/device +COPY src/device/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Source tree +WORKDIR /var/teraflow +COPY src/__init__.py ./__init__.py +COPY src/common/*.py ./common/ +COPY src/common/tests/. ./common/tests/ +COPY src/common/tools/. ./common/tools/ +COPY src/context/__init__.py context/__init__.py +COPY src/context/client/. context/client/ +COPY src/device/. device/ +COPY src/monitoring/__init__.py monitoring/__init__.py +COPY src/monitoring/client/. monitoring/client/ +COPY src/service/__init__.py service/__init__.py +COPY src/service/client/. service/client/ +COPY src/vnt_manager/__init__.py vnt_manager/__init__.py +COPY src/vnt_manager/client/. vnt_manager/client/ + +# OpenConfig models as in device Dockerfile +RUN mkdir -p /tmp/openconfig && git clone https://github.com/openconfig/public.git /tmp/openconfig +WORKDIR /tmp/openconfig +RUN git fetch && git checkout v4.4.0 +RUN rm -rf /var/teraflow/device/service/drivers/gnmi_openconfig/git +RUN mkdir -p /var/teraflow/device/service/drivers/gnmi_openconfig/git/openconfig/public +RUN mv /tmp/openconfig/release /var/teraflow/device/service/drivers/gnmi_openconfig/git/openconfig/public +RUN mv /tmp/openconfig/third_party /var/teraflow/device/service/drivers/gnmi_openconfig/git/openconfig/public +RUN rm -rf /tmp/openconfig +WORKDIR /var/teraflow + +ENV RUN_L2VPN_LAB=1 +ENV PYTHONPATH=/var/teraflow +CMD ["pytest", "--log-level=DEBUG", "--verbose", "device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py"] diff --git a/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py index f5bee3c12..4c73e0889 100644 --- a/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py +++ b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py @@ -12,565 +12,144 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging, os, pytest, time -from typing import Dict, Tuple -os.environ['DEVICE_EMULATED_ONLY'] = 'YES' +""" +Integration validation of GnmiOpenConfigDriver for L2VPN (VPLS) over MPLS/LDP +using the ContainerLab dataplane (dc1--r1--r2--dc2). +""" -# pylint: disable=wrong-import-position +import grpc, logging, os, pytest, time +from typing import Dict, List, Tuple from device.service.drivers.gnmi_openconfig.GnmiOpenConfigDriver import GnmiOpenConfigDriver -#from device.service.driver_api._Driver import ( -# RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES, RESOURCE_ROUTING_POLICIES, RESOURCE_SERVICES -#) - -logging.basicConfig(level=logging.DEBUG) -#logging.getLogger('ncclient.operations.rpc').setLevel(logging.INFO) -#logging.getLogger('ncclient.transport.parser').setLevel(logging.INFO) - -LOGGER = logging.getLogger(__name__) - - -##### DRIVERS FIXTURE ################################################################################################## - -DEVICES = { - 'SW1': {'address': '172.20.20.101', 'port': 6030, 'settings': { - 'username': 'admin', 'password': 'admin', - 'vendor': None, 'force_running': False, 'hostkey_verify': False, 'look_for_keys': False, 'allow_agent': False, - 'commit_per_rule': True, 'device_params': {'name': 'default'}, 'manager_params': {'timeout' : 120} - }}, - 'SW2': {'address': '10.1.1.87', 'port': 830, 'settings': { - 'username': 'ocnos', 'password': 'ocnos', - 'vendor': None, 'force_running': False, 'hostkey_verify': False, 'look_for_keys': False, 'allow_agent': False, - 'commit_per_rule': True, 'device_params': {'name': 'default'}, 'manager_params': {'timeout' : 120} - }}, -} - -@pytest.fixture(scope='session') -def drivers() -> Dict[str, OpenConfigDriver]: - _drivers : Dict[str, OpenConfigDriver] = dict() - for device_name, driver_params in DEVICES.items(): - driver = OpenConfigDriver(driver_params['address'], driver_params['port'], **(driver_params['settings'])) - driver.Connect() - _drivers[device_name] = driver - yield _drivers - time.sleep(1) - for _,driver in _drivers.items(): - driver.Disconnect() - - -def network_instance(ni_name, ni_type, ni_router_id=None, ni_route_distinguisher=None) -> Tuple[str, Dict]: - path = '/network_instance[{:s}]'.format(ni_name) - data = {'name': ni_name, 'type': ni_type} - if ni_router_id is not None: data['router_id'] = ni_router_id - if ni_route_distinguisher is not None: data['route_distinguisher'] = ni_route_distinguisher - return path, json.dumps(data) - -def network_instance_add_protocol_bgp(ni_name, ni_type, ni_router_id, ni_bgp_as, neighbors=[]) -> Tuple[str, Dict]: - path = '/network_instance[{:s}]/protocols[BGP]'.format(ni_name) - data = { - 'name': ni_name, 'type': ni_type, 'router_id': ni_router_id, 'identifier': 'BGP', - 'protocol_name': ni_bgp_as, 'as': ni_bgp_as - } - if len(neighbors) > 0: - data['neighbors'] = [ - {'ip_address': neighbor_ip_address, 'remote_as': neighbor_remote_as} - for neighbor_ip_address, neighbor_remote_as in neighbors - ] - return path, json.dumps(data) - -def network_instance_add_protocol_direct(ni_name, ni_type) -> Tuple[str, Dict]: - path = '/network_instance[{:s}]/protocols[DIRECTLY_CONNECTED]'.format(ni_name) - data = { - 'name': ni_name, 'type': ni_type, 'identifier': 'DIRECTLY_CONNECTED', - 'protocol_name': 'DIRECTLY_CONNECTED' - } - return path, json.dumps(data) - -def network_instance_add_protocol_static(ni_name, ni_type) -> Tuple[str, Dict]: - path = '/network_instance[{:s}]/protocols[STATIC]'.format(ni_name) - data = { - 'name': ni_name, 'type': ni_type, 'identifier': 'STATIC', - 'protocol_name': 'STATIC' - } - return path, json.dumps(data) - -#def network_instance_static_route(ni_name, prefix, next_hop, next_hop_index=0) -> Tuple[str, Dict]: -# path = '/network_instance[{:s}]/static_route[{:s}]'.format(ni_name, prefix) -# data = {'name': ni_name, 'prefix': prefix, 'next_hop': next_hop, 'next_hop_index': next_hop_index} -# return path, json.dumps(data) - -def network_instance_add_table_connection( - ni_name, src_protocol, dst_protocol, address_family, default_import_policy, bgp_as=None -) -> Tuple[str, Dict]: - path = '/network_instance[{:s}]/table_connections[{:s}][{:s}][{:s}]'.format( - ni_name, src_protocol, dst_protocol, address_family - ) - data = { - 'name': ni_name, 'src_protocol': src_protocol, 'dst_protocol': dst_protocol, - 'address_family': address_family, 'default_import_policy': default_import_policy, - } - if bgp_as is not None: data['as'] = bgp_as - return path, json.dumps(data) - -def interface( - name, index, description=None, if_type=None, vlan_id=None, mtu=None, ipv4_address_prefix=None, enabled=None -) -> Tuple[str, Dict]: - path = '/interface[{:s}]/subinterface[{:d}]'.format(name, index) - data = {'name': name, 'index': index} - if description is not None: data['description'] = description - if if_type is not None: data['type' ] = if_type - if vlan_id is not None: data['vlan_id' ] = vlan_id - if mtu is not None: data['mtu' ] = mtu - if enabled is not None: data['enabled' ] = enabled - if ipv4_address_prefix is not None: - ipv4_address, ipv4_prefix = ipv4_address_prefix - data['address_ip' ] = ipv4_address - data['address_prefix'] = ipv4_prefix - return path, json.dumps(data) - -def network_instance_interface(ni_name, ni_type, if_name, if_index) -> Tuple[str, Dict]: - path = '/network_instance[{:s}]/interface[{:s}.{:d}]'.format(ni_name, if_name, if_index) - data = {'name': ni_name, 'type': ni_type, 'id': if_name, 'interface': if_name, 'subinterface': if_index} - return path, json.dumps(data) - -def test_configure(drivers : Dict[str, OpenConfigDriver]): - #resources_to_get = [] - #resources_to_get = [RESOURCE_ENDPOINTS] - #resources_to_get = [RESOURCE_INTERFACES] - #resources_to_get = [RESOURCE_NETWORK_INSTANCES] - #resources_to_get = [RESOURCE_ROUTING_POLICIES] - #resources_to_get = [RESOURCE_SERVICES] - #LOGGER.info('resources_to_get = {:s}'.format(str(resources_to_get))) - #results_getconfig = driver.GetConfig(resources_to_get) - #LOGGER.info('results_getconfig = {:s}'.format(str(results_getconfig))) - - csgw1_resources_to_set = [ - network_instance('ecoc24', 'L3VRF', '192.168.150.1', '65001:1'), - network_instance_add_protocol_direct('ecoc24', 'L3VRF'), - network_instance_add_protocol_static('ecoc24', 'L3VRF'), - network_instance_add_protocol_bgp('ecoc24', 'L3VRF', '192.168.150.1', '65001', neighbors=[ - ('192.168.150.2', '65001') - ]), - network_instance_add_table_connection('ecoc24', 'DIRECTLY_CONNECTED', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'), - network_instance_add_table_connection('ecoc24', 'STATIC', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'), - - interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500), - network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0), - interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.10.1', 24), enabled=True), - - interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500), - network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0), - interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.150.1', 24), enabled=True), - ] - LOGGER.info('CSGW1 resources_to_set = {:s}'.format(str(csgw1_resources_to_set))) - results_setconfig = drivers['CSGW1'].SetConfig(csgw1_resources_to_set) - LOGGER.info('CSGW1 results_setconfig = {:s}'.format(str(results_setconfig))) - - csgw2_resources_to_set = [ - network_instance('ecoc24', 'L3VRF', '192.168.150.2', '65001:1'), - network_instance_add_protocol_direct('ecoc24', 'L3VRF'), - network_instance_add_protocol_static('ecoc24', 'L3VRF'), - network_instance_add_protocol_bgp('ecoc24', 'L3VRF', '192.168.150.2', '65001', neighbors=[ - ('192.168.150.1', '65001') - ]), - network_instance_add_table_connection('ecoc24', 'DIRECTLY_CONNECTED', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'), - network_instance_add_table_connection('ecoc24', 'STATIC', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'), - - interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500), - network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0), - interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.20.1', 24), enabled=True), - - interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500), - network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0), - interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.150.2', 24), enabled=True), - ] - LOGGER.info('CSGW2 resources_to_set = {:s}'.format(str(csgw2_resources_to_set))) - results_setconfig = drivers['CSGW2'].SetConfig(csgw2_resources_to_set) - LOGGER.info('CSGW2 results_setconfig = {:s}'.format(str(results_setconfig))) - - csgw1_resources_to_delete = [ - network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0), - network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0), - #interface('ce1', 0), - #interface('xe5', 0), - network_instance('ecoc24', 'L3VRF'), - ] - LOGGER.info('CSGW1 resources_to_delete = {:s}'.format(str(csgw1_resources_to_delete))) - results_deleteconfig = drivers['CSGW1'].DeleteConfig(csgw1_resources_to_delete) - LOGGER.info('CSGW1 results_deleteconfig = {:s}'.format(str(results_deleteconfig))) - - csgw2_resources_to_delete = [ - network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0), - network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0), - #interface('ce1', 0), - #interface('xe5', 0), - network_instance('ecoc24', 'L3VRF'), - ] - LOGGER.info('CSGW2 resources_to_delete = {:s}'.format(str(csgw2_resources_to_delete))) - results_deleteconfig = drivers['CSGW2'].DeleteConfig(csgw2_resources_to_delete) - LOGGER.info('CSGW2 results_deleteconfig = {:s}'.format(str(results_deleteconfig))) - - - - - - -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -os.environ['DEVICE_EMULATED_ONLY'] = 'YES' - -# pylint: disable=wrong-import-position -import logging, pytest, time -from typing import Dict, List -from device.service.driver_api._Driver import ( - RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES, - RESOURCE_ROUTING_POLICIES, RESOURCE_SERVICES -) -from device.service.drivers.gnmi_openconfig.GnmiOpenConfigDriver import GnmiOpenConfigDriver -from .storage.Storage import Storage -from .tools.manage_config import ( - check_config_endpoints, check_config_interfaces, check_config_network_instances, del_config, get_config, set_config -) -from .tools.check_updates import check_updates -from .tools.request_composers import ( - interface, network_instance, network_instance_interface, network_instance_static_route +from device.tests.gnmi_openconfig.tools.request_composers import ( + connection_point, connection_point_endpoint_local, connection_point_endpoint_remote, + interface, mpls_global, mpls_ldp_interface, network_instance, vlan, ) -logging.basicConfig(level=logging.DEBUG) +logging.basicConfig(level=logging.INFO) LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - - -##### DRIVER FIXTURE ################################################################################################### -DRIVER_SETTING_ADDRESS = '172.20.20.101' -DRIVER_SETTING_PORT = 6030 -DRIVER_SETTING_USERNAME = 'admin' -DRIVER_SETTING_PASSWORD = 'admin' -DRIVER_SETTING_USE_TLS = False - -@pytest.fixture(scope='session') -def driver() -> GnmiOpenConfigDriver: - _driver = GnmiOpenConfigDriver( - DRIVER_SETTING_ADDRESS, DRIVER_SETTING_PORT, - username=DRIVER_SETTING_USERNAME, - password=DRIVER_SETTING_PASSWORD, - use_tls=DRIVER_SETTING_USE_TLS, - ) - _driver.Connect() - yield _driver - time.sleep(1) - _driver.Disconnect() - - -##### STORAGE FIXTURE ################################################################################################## - -@pytest.fixture(scope='session') -def storage() -> Dict: - yield Storage() +# Skip unless the lab is explicitly enabled +RUN_LAB = os.environ.get('RUN_L2VPN_LAB', '0') == '1' +pytestmark = pytest.mark.skipif(not RUN_LAB, reason='Requires running ContainerLab L2VPN dataplane') +GNMI_PORT = 6030 +USERNAME = 'admin' +PASSWORD = 'admin' -##### NETWORK INSTANCE DETAILS ######################################################################################### +SERVICE_NAME = 'tfs-l2vpn-vpls' +VC_ID = 100 +VLAN_ID = 100 -NETWORK_INSTANCES = [ +ROUTERS = [ { - 'name': 'test-l3-svc', - 'type': 'L3VRF', - 'interfaces': [ - {'name': 'Ethernet1', 'index': 0, 'ipv4_addr': '192.168.1.1', 'ipv4_prefix': 24, 'enabled': True}, - {'name': 'Ethernet10', 'index': 0, 'ipv4_addr': '192.168.10.1', 'ipv4_prefix': 24, 'enabled': True}, - ], - 'static_routes': [ - {'prefix': '172.0.0.0/24', 'next_hop': '172.16.0.2', 'metric': 1}, - {'prefix': '172.2.0.0/24', 'next_hop': '172.16.0.3', 'metric': 1}, - ] + 'name' : 'r1', + 'address' : '172.20.20.101', + 'ldp_router_id' : '172.20.20.101', + 'core_interface' : 'Ethernet2', + 'access_interface': 'Ethernet10', + 'peer' : '172.20.20.102', + }, + { + 'name' : 'r2', + 'address' : '172.20.20.102', + 'ldp_router_id' : '172.20.20.102', + 'core_interface' : 'Ethernet1', + 'access_interface': 'Ethernet10', + 'peer' : '172.20.20.101', }, - #{ - # 'name': 'test-l2-svc', - # 'type': 'L2VSI', - # 'interfaces': [ - # {'name': 'Ethernet2', 'index': 0, 'ipv4_addr': '192.168.1.1', 'ipv4_prefix': 24, 'enabled': True}, - # {'name': 'Ethernet4', 'index': 0, 'ipv4_addr': '192.168.10.1', 'ipv4_prefix': 24, 'enabled': True}, - # ], - # 'static_routes': [ - # {'prefix': '172.0.0.0/24', 'next_hop': '172.16.0.2', 'metric': 1}, - # {'prefix': '172.2.0.0/24', 'next_hop': '172.16.0.3', 'metric': 1}, - # ] - #} ] -##### TEST METHODS ##################################################################################################### - -def test_get_endpoints( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - results_getconfig = get_config(driver, [RESOURCE_ENDPOINTS]) - storage.endpoints.populate(results_getconfig) - check_config_endpoints(driver, storage) - - -def test_get_interfaces( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - results_getconfig = get_config(driver, [RESOURCE_INTERFACES]) - storage.interfaces.populate(results_getconfig) - check_config_interfaces(driver, storage) - - -def test_get_network_instances( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - results_getconfig = get_config(driver, [RESOURCE_NETWORK_INSTANCES]) - storage.network_instances.populate(results_getconfig) - check_config_network_instances(driver, storage) - - -def test_set_network_instances( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - check_config_network_instances(driver, storage) - - resources_to_set = list() - ni_names = list() - for ni in NETWORK_INSTANCES: - ni_name = ni['name'] - ni_type = ni['type'] - resources_to_set.append(network_instance(ni_name, ni_type)) - ni_names.append(ni_name) - storage.network_instances.network_instances.add(ni_name, {'type': ni_type}) - storage.network_instances.protocols.add(ni_name, 'DIRECTLY_CONNECTED') - storage.network_instances.tables.add(ni_name, 'DIRECTLY_CONNECTED', 'IPV4') - storage.network_instances.tables.add(ni_name, 'DIRECTLY_CONNECTED', 'IPV6') - - results_setconfig = set_config(driver, resources_to_set) - check_updates(results_setconfig, '/network_instance[{:s}]', ni_names) - - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) - - -def test_add_interfaces_to_network_instance( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - check_config_network_instances(driver, storage) - - resources_to_set = list() - ni_if_names = list() - for ni in NETWORK_INSTANCES: - ni_name = ni['name'] - for ni_if in ni.get('interfaces', list()): - if_name = ni_if['name' ] - subif_index = ni_if['index'] - resources_to_set.append(network_instance_interface(ni_name, if_name, subif_index)) - ni_if_names.append((ni_name, '{:s}.{:d}'.format(if_name, subif_index))) - storage.network_instances.interfaces.add(ni_name, if_name, subif_index) - - results_setconfig = set_config(driver, resources_to_set) - check_updates(results_setconfig, '/network_instance[{:s}]/interface[{:s}]', ni_if_names) - - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) - - -def test_set_interfaces( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - check_config_network_instances(driver, storage) - - resources_to_set = list() - if_names = list() - for ni in NETWORK_INSTANCES: - for ni_if in ni.get('interfaces', list()): - if_name = ni_if['name' ] - subif_index = ni_if['index' ] - ipv4_address = ni_if['ipv4_addr' ] - ipv4_prefix = ni_if['ipv4_prefix'] - enabled = ni_if['enabled' ] - resources_to_set.append(interface( - if_name, subif_index, ipv4_address, ipv4_prefix, enabled - )) - if_names.append(if_name) - storage.interfaces.ipv4_addresses.add(if_name, subif_index, ipv4_address, { - 'origin' : 'STATIC', 'prefix': ipv4_prefix - }) - default_vlan = storage.network_instances.vlans.get('default', 1) - default_vlan_members : List[str] = default_vlan.setdefault('members', list()) - if if_name in default_vlan_members: default_vlan_members.remove(if_name) - - results_setconfig = set_config(driver, resources_to_set) - check_updates(results_setconfig, '/interface[{:s}]', if_names) - - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) - - -def test_set_network_instance_static_routes( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - check_config_network_instances(driver, storage) - - resources_to_set = list() - ni_sr_prefixes = list() - for ni in NETWORK_INSTANCES: - ni_name = ni['name'] - for ni_sr in ni.get('static_routes', list()): - ni_sr_prefix = ni_sr['prefix' ] - ni_sr_next_hop = ni_sr['next_hop'] - ni_sr_metric = ni_sr['metric' ] - ni_sr_next_hop_index = 'AUTO_{:d}_{:s}'.format(ni_sr_metric, '-'.join(ni_sr_next_hop.split('.'))) - resources_to_set.append(network_instance_static_route( - ni_name, ni_sr_prefix, ni_sr_next_hop_index, ni_sr_next_hop, metric=ni_sr_metric - )) - ni_sr_prefixes.append((ni_name, ni_sr_prefix)) - storage.network_instances.protocols.add(ni_name, 'STATIC') - storage.network_instances.protocol_static.add(ni_name, 'STATIC', ni_sr_prefix, { - 'prefix': ni_sr_prefix, 'next_hops': { - ni_sr_next_hop_index: {'next_hop': ni_sr_next_hop, 'metric': ni_sr_metric} - } - }) - storage.network_instances.tables.add(ni_name, 'STATIC', 'IPV4') - storage.network_instances.tables.add(ni_name, 'STATIC', 'IPV6') - - results_setconfig = set_config(driver, resources_to_set) - check_updates(results_setconfig, '/network_instance[{:s}]/static_route[{:s}]', ni_sr_prefixes) - - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) - - -def test_del_network_instance_static_routes( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - check_config_network_instances(driver, storage) - - resources_to_delete = list() - ni_sr_prefixes = list() - for ni in NETWORK_INSTANCES: - ni_name = ni['name'] - for ni_sr in ni.get('static_routes', list()): - ni_sr_prefix = ni_sr['prefix' ] - ni_sr_next_hop = ni_sr['next_hop'] - ni_sr_metric = ni_sr['metric' ] - ni_sr_next_hop_index = 'AUTO_{:d}_{:s}'.format(ni_sr_metric, '-'.join(ni_sr_next_hop.split('.'))) - resources_to_delete.append(network_instance_static_route( - ni_name, ni_sr_prefix, ni_sr_next_hop_index, ni_sr_next_hop, metric=ni_sr_metric - )) - ni_sr_prefixes.append((ni_name, ni_sr_prefix)) - - storage.network_instances.protocols.remove(ni_name, 'STATIC') - storage.network_instances.protocol_static.remove(ni_name, 'STATIC', ni_sr_prefix) - storage.network_instances.tables.remove(ni_name, 'STATIC', 'IPV4') - storage.network_instances.tables.remove(ni_name, 'STATIC', 'IPV6') - - results_deleteconfig = del_config(driver, resources_to_delete) - check_updates(results_deleteconfig, '/network_instance[{:s}]/static_route[{:s}]', ni_sr_prefixes) - - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - #check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) - - -def test_del_interfaces( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - #check_config_network_instances(driver, storage) - - resources_to_delete = list() - if_names = list() - for ni in NETWORK_INSTANCES: - for ni_if in ni.get('interfaces', list()): - if_name = ni_if['name' ] - subif_index = ni_if['index' ] - ipv4_address = ni_if['ipv4_addr' ] - ipv4_prefix = ni_if['ipv4_prefix'] - enabled = ni_if['enabled' ] - resources_to_delete.append(interface(if_name, subif_index, ipv4_address, ipv4_prefix, enabled)) - if_names.append(if_name) - storage.interfaces.ipv4_addresses.remove(if_name, subif_index, ipv4_address) - default_vlan = storage.network_instances.vlans.get('default', 1) - default_vlan_members : List[str] = default_vlan.setdefault('members', list()) - if if_name not in default_vlan_members: default_vlan_members.append(if_name) - - results_deleteconfig = del_config(driver, resources_to_delete) - check_updates(results_deleteconfig, '/interface[{:s}]', if_names) - - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - #check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) - - -def test_del_interfaces_from_network_instance( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - #check_config_network_instances(driver, storage) - - resources_to_delete = list() - ni_if_names = list() - for ni in NETWORK_INSTANCES: - ni_name = ni['name'] - for ni_if in ni.get('interfaces', list()): - if_name = ni_if['name' ] - subif_index = ni_if['index'] - resources_to_delete.append(network_instance_interface(ni_name, if_name, subif_index)) - ni_if_names.append((ni_name, '{:s}.{:d}'.format(if_name, subif_index))) - storage.network_instances.interfaces.remove(ni_name, if_name, subif_index) - - results_deleteconfig = del_config(driver, resources_to_delete) - check_updates(results_deleteconfig, '/network_instance[{:s}]/interface[{:s}]', ni_if_names) - - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - #check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) +def _build_l2vpn_resources(router: Dict[str, str]) -> Tuple[List[Tuple[str, Dict]], List[Tuple[str, Dict]]]: + set_resources : List[Tuple[str, Dict]] = [ + network_instance(SERVICE_NAME, 'L2VSI'), + connection_point(SERVICE_NAME, 'access'), + connection_point_endpoint_local( + SERVICE_NAME, 'access', 'access-ep', router['access_interface'], subif=0, precedence=0 + ), + connection_point(SERVICE_NAME, 'core'), + connection_point_endpoint_remote( + SERVICE_NAME, 'core', 'core-ep', router['peer'], vc_id=VC_ID, precedence=100 + ), + ] + del_resources = list(reversed(set_resources)) + return set_resources, del_resources + +def _set_with_retry(driver: GnmiOpenConfigDriver, resources: List[Tuple[str, Dict]], attempts: int = 5, wait_s: int = 5): + """Retry SetConfig while the device reports it is not yet initialized.""" + last_exc = None + for i in range(attempts): + try: + return driver.SetConfig(resources) + except grpc.RpcError as exc: + last_exc = exc + if exc.code() == grpc.StatusCode.UNAVAILABLE and 'system not yet initialized' in exc.details(): + LOGGER.info('Device not ready (attempt %s/%s), waiting %ss', i + 1, attempts, wait_s) + time.sleep(wait_s) + continue + raise + if last_exc: + raise last_exc + return [] -def test_del_network_instances( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - #check_config_network_instances(driver, storage) - resources_to_delete = list() - ni_names = list() - for ni in NETWORK_INSTANCES: - ni_name = ni['name'] - ni_type = ni['type'] - resources_to_delete.append(network_instance(ni_name, ni_type)) - ni_names.append(ni_name) - storage.network_instances.network_instances.remove(ni_name) - storage.network_instances.protocols.remove(ni_name, 'DIRECTLY_CONNECTED') - storage.network_instances.tables.remove(ni_name, 'DIRECTLY_CONNECTED', 'IPV4') - storage.network_instances.tables.remove(ni_name, 'DIRECTLY_CONNECTED', 'IPV6') +@pytest.fixture(scope='session') +def drivers() -> Dict[str, GnmiOpenConfigDriver]: + _drivers : Dict[str, GnmiOpenConfigDriver] = dict() + for router in ROUTERS: + driver = GnmiOpenConfigDriver( + router['address'], GNMI_PORT, username=USERNAME, password=PASSWORD, use_tls=False + ) + try: + driver.Connect() + except Exception as exc: # pylint: disable=broad-except + pytest.skip(f"Cannot connect to {router['name']} ({router['address']}): {exc}") + _drivers[router['name']] = driver + yield _drivers + time.sleep(1) + for _, driver in _drivers.items(): + driver.Disconnect() - results_deleteconfig = del_config(driver, resources_to_delete) - check_updates(results_deleteconfig, '/network_instance[{:s}]', ni_names) - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) +def test_configure_mpls_ldp(drivers: Dict[str, GnmiOpenConfigDriver]) -> None: + """Enable LDP globally and on the r1<->r2 core links.""" + for router in ROUTERS: + driver = drivers[router['name']] + resources = [ + mpls_global(router['ldp_router_id'], hello_interval=5, hello_holdtime=15), + mpls_ldp_interface(router['core_interface'], hello_interval=5, hello_holdtime=15), + ] + LOGGER.info('Configuring MPLS/LDP on %s (%s)', router['name'], router['address']) + results = _set_with_retry(driver, resources) + LOGGER.info('MPLS/LDP result: %s', results) + assert all( + (result is True) or (isinstance(result, tuple) and len(result) > 1 and result[1] is True) + for result in results + ) + + +def test_configure_l2vpn_vpls(drivers: Dict[str, GnmiOpenConfigDriver]) -> None: + """Fallback validation: create a VLAN in default VRF and attach core/access interfaces.""" + for router in ROUTERS: + driver = drivers[router['name']] + vlan_res = vlan('default', VLAN_ID, members=[], vlan_name='tfs-vlan') + if_access = interface(router['access_interface'], VLAN_ID, enabled=True, vlan_id=VLAN_ID, + ipv4_address=None, ipv4_prefix=None) + if_core = interface(router['core_interface'], VLAN_ID, enabled=True, vlan_id=VLAN_ID, + ipv4_address=None, ipv4_prefix=None) + + LOGGER.info('Configuring VLAN %s on %s (%s)', VLAN_ID, router['name'], router['address']) + results_vlan = _set_with_retry(driver, [vlan_res, if_access, if_core]) + LOGGER.info('VLAN result: %s', results_vlan) + assert all( + (result is True) or (isinstance(result, tuple) and len(result) > 1 and result[1] is True) + for result in results_vlan + ) + + LOGGER.info('Tearing down VLAN %s on %s (%s)', VLAN_ID, router['name'], router['address']) + results_del = driver.DeleteConfig([if_core, if_access, vlan_res]) + assert all( + (result is True) or (isinstance(result, tuple) and len(result) > 1 and result[1] is True) + for result in results_del + ) diff --git a/src/device/tests/gnmi_openconfig/tools/request_composers.py b/src/device/tests/gnmi_openconfig/tools/request_composers.py index 0a8aefe24..198291d73 100644 --- a/src/device/tests/gnmi_openconfig/tools/request_composers.py +++ b/src/device/tests/gnmi_openconfig/tools/request_composers.py @@ -14,11 +14,20 @@ from typing import Dict, Tuple -def interface(if_name, sif_index, ipv4_address, ipv4_prefix, enabled) -> Tuple[str, Dict]: +def interface(if_name, sif_index, ipv4_address=None, ipv4_prefix=None, enabled=True, vlan_id=None) -> Tuple[str, Dict]: str_path = '/interface[{:s}]'.format(if_name) str_data = { - 'name': if_name, 'enabled': enabled, 'sub_if_index': sif_index, 'sub_if_enabled': enabled, - 'sub_if_ipv4_enabled': enabled, 'sub_if_ipv4_address': ipv4_address, 'sub_if_ipv4_prefix': ipv4_prefix + 'name': if_name, + 'enabled': enabled, + 'index': sif_index, + 'sub_if_index': sif_index, + 'sub_if_enabled': enabled, + 'sub_if_ipv4_enabled': enabled, + 'sub_if_ipv4_address': ipv4_address, + 'sub_if_ipv4_prefix': ipv4_prefix, + 'address_ip': ipv4_address, + 'address_prefix': ipv4_prefix, + 'vlan_id': vlan_id, } return str_path, str_data @@ -42,3 +51,73 @@ def network_instance_interface(ni_name, if_name, sif_index) -> Tuple[str, Dict]: 'name': ni_name, 'if_name': if_name, 'sif_index': sif_index } return str_path, str_data + +def mpls_global(ldp_router_id: str, hello_interval: int = None, hello_holdtime: int = None) -> Tuple[str, Dict]: + str_path = '/mpls' + str_data = { + 'ldp': { + 'lsr_id': ldp_router_id, + 'hello_interval': hello_interval, + 'hello_holdtime': hello_holdtime, + } + } + return str_path, str_data + +def mpls_ldp_interface(if_name: str, hello_interval: int = None, hello_holdtime: int = None) -> Tuple[str, Dict]: + str_path = '/mpls/interface[{:s}]'.format(if_name) + str_data = { + 'interface': if_name, + 'hello_interval': hello_interval, + 'hello_holdtime': hello_holdtime, + } + return str_path, str_data + +def connection_point(ni_name: str, cp_id: str) -> Tuple[str, Dict]: + str_path = '/network_instance[{:s}]/connection_point[{:s}]'.format(ni_name, cp_id) + str_data = {'name': ni_name, 'connection_point_id': cp_id} + return str_path, str_data + +def connection_point_endpoint_local( + ni_name: str, cp_id: str, ep_id: str, if_name: str, subif: int = 0, precedence: int = 0, site_id: int = None +) -> Tuple[str, Dict]: + str_path = '/network_instance[{:s}]/connection_point[{:s}]/endpoint[{:s}]'.format(ni_name, cp_id, ep_id) + str_data = { + 'name': ni_name, + 'connection_point_id': cp_id, + 'endpoint_id': ep_id, + 'type': 'LOCAL', + 'precedence': precedence, + 'interface': if_name, + 'subinterface': subif, + 'site_id': site_id, + } + return str_path, str_data + +def connection_point_endpoint_remote( + ni_name: str, cp_id: str, ep_id: str, remote_system: str, vc_id: int, + precedence: int = 0, site_id: int = None +) -> Tuple[str, Dict]: + str_path = '/network_instance[{:s}]/connection_point[{:s}]/endpoint[{:s}]'.format(ni_name, cp_id, ep_id) + str_data = { + 'name': ni_name, + 'connection_point_id': cp_id, + 'endpoint_id': ep_id, + 'type': 'REMOTE', + 'precedence': precedence, + 'remote_system': remote_system, + 'virtual_circuit_id': vc_id, + 'site_id': site_id, + } + return str_path, str_data + +def vlan(ni_name: str, vlan_id: int, members=None, vlan_name: str = None) -> Tuple[str, Dict]: + if members is None: + members = [] + str_path = '/network_instance[{:s}]/vlan[{:d}]'.format(ni_name, vlan_id) + str_data = { + 'name': ni_name, + 'vlan_id': vlan_id, + 'vlan_name': vlan_name, + 'members': members, + } + return str_path, str_data -- GitLab From 6d0079b0daf29a9d0e078a7b42ab9cc190bf520b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 14 Jan 2026 15:39:32 +0000 Subject: [PATCH 04/79] End-to-end test - L2 VPN gNMI OpenConfig: - Added README.md - Added GitLab CI descriptor --- src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml | 324 ++++++++++++++++++++++++ src/tests/l2_vpn_gnmi_oc/README.md | 120 +++++++++ 2 files changed, 444 insertions(+) create mode 100644 src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml create mode 100644 src/tests/l2_vpn_gnmi_oc/README.md diff --git a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml new file mode 100644 index 000000000..2655cee03 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml @@ -0,0 +1,324 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build, tag, and push the Docker image to the GitLab Docker registry +build l2_vpn_gnmi_oc: + variables: + TEST_NAME: 'l2_vpn_gnmi_oc' + stage: build + before_script: + - docker image prune --force + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + - docker buildx build -t "${TEST_NAME}:latest" -f ./src/tests/${TEST_NAME}/Dockerfile . + - docker tag "${TEST_NAME}:latest" "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest" + - docker push "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest" + after_script: + - docker image prune --force + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/tests/${TEST_NAME}/**/*.{py,in,sh,yml} + - src/tests/${TEST_NAME}/Dockerfile + - .gitlab-ci.yml + +# Deploy TeraFlowSDN and Execute end-2-end test +end2end_test l2_vpn_gnmi_oc: + timeout: 45m + variables: + TEST_NAME: 'l2_vpn_gnmi_oc' + stage: end2end_test + # Disable to force running it after all other tasks + #needs: + # - build l2_vpn_gnmi_oc + before_script: + # Cleanup old ContainerLab scenarios + - containerlab destroy --all --cleanup || true + + # Do Docker cleanup + - docker ps --all --quiet | xargs --no-run-if-empty docker stop + - docker container prune --force + - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force + - docker image prune --force + - docker network prune --force + - docker volume prune --all --force + - docker buildx prune --force + + # Check MicroK8s is ready + - microk8s status --wait-ready + - LOOP_MAX_ATTEMPTS=10 + - LOOP_COUNTER=0 + - > + while ! kubectl get pods --all-namespaces &> /dev/null; do + printf "%c" "." + sleep 1 + LOOP_COUNTER=$((LOOP_COUNTER + 1)) + if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then + echo "Max attempts reached, exiting the loop." + exit 1 + fi + done + - kubectl get pods --all-namespaces + + # Always delete Kubernetes namespaces + - export K8S_NAMESPACES=$(kubectl get namespace -o jsonpath='{.items[*].metadata.name}') + - echo "K8S_NAMESPACES=${K8S_NAMESPACES}" + + - export OLD_NATS_NAMESPACES=$(echo "${K8S_NAMESPACES}" | tr ' ' '\n' | grep -E '^nats') + - echo "OLD_NATS_NAMESPACES=${OLD_NATS_NAMESPACES}" + - > + for ns in ${OLD_NATS_NAMESPACES}; do + if [[ "$ns" == nats* ]]; then + if helm3 status "$ns" &>/dev/null; then + helm3 uninstall "$ns" -n "$ns" + else + echo "Release '$ns' not found, skipping..." + fi + fi + done + - export OLD_NAMESPACES=$(echo "${K8S_NAMESPACES}" | tr ' ' '\n' | grep -E '^(tfs|crdb|qdb|kafka|nats)') + - echo "OLD_NAMESPACES=${OLD_NAMESPACES}" + - kubectl delete namespace ${OLD_NAMESPACES} || true + + # Clean-up Kubernetes Failed pods + - > + kubectl get pods --all-namespaces --no-headers --field-selector=status.phase=Failed + -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name | + xargs --no-run-if-empty --max-args=2 kubectl delete pod --namespace + + # Login Docker repository + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + + script: + # Download Docker image to run the test + - docker pull "${CI_REGISTRY_IMAGE}/${TEST_NAME}:latest" + + # Check MicroK8s is ready + - microk8s status --wait-ready + - LOOP_MAX_ATTEMPTS=10 + - LOOP_COUNTER=0 + - > + while ! kubectl get pods --all-namespaces &> /dev/null; do + printf "%c" "." + sleep 1 + LOOP_COUNTER=$((LOOP_COUNTER + 1)) + if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then + echo "Max attempts reached, exiting the loop." + exit 1 + fi + done + - kubectl get pods --all-namespaces + + # Deploy ContainerLab Scenario + - RUNNER_PATH=`pwd` + #- cd $PWD/src/tests/${TEST_NAME} + - mkdir -p /tmp/clab/${TEST_NAME} + - cp -R src/tests/${TEST_NAME}/clab/* /tmp/clab/${TEST_NAME} + - tree -la /tmp/clab/${TEST_NAME} + - cd /tmp/clab/${TEST_NAME} + - containerlab deploy --reconfigure --topo ${TEST_NAME}.clab.yml + - cd $RUNNER_PATH + + # Wait for initialization of Device NOSes + - sleep 3 + - docker ps -a + + # Dump configuration of the routers (before any configuration) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Configure TeraFlowSDN deployment + # Uncomment if DEBUG log level is needed for the components + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml + + - source src/tests/${TEST_NAME}/deploy_specs.sh + #- export TFS_REGISTRY_IMAGES="${CI_REGISTRY_IMAGE}" + #- export TFS_SKIP_BUILD="YES" + #- export TFS_IMAGE_TAG="latest" + #- echo "TFS_REGISTRY_IMAGES=${CI_REGISTRY_IMAGE}" + + # Deploy TeraFlowSDN + - ./deploy/crdb.sh + - ./deploy/nats.sh + - ./deploy/kafka.sh + #- ./deploy/qdb.sh + - ./deploy/tfs.sh + - ./deploy/show.sh + + ## Wait for Context to be subscribed to NATS + ## WARNING: this loop is infinite if there is no subscriber (such as monitoring). + ## Investigate if we can use a counter to limit the number of iterations. + ## For now, keep it commented out. + #- LOOP_MAX_ATTEMPTS=180 + #- LOOP_COUNTER=0 + #- > + # while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do + # echo "Attempt: $LOOP_COUNTER" + # kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1; + # sleep 1; + # LOOP_COUNTER=$((LOOP_COUNTER + 1)) + # if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then + # echo "Max attempts reached, exiting the loop." + # break + # fi + # done + - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + + # Run end-to-end test: onboard scenario + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-onboarding.sh + + # Run end-to-end test: configure service TFS + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-create.sh + + # Dump configuration of the routers (after configure TFS service) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Run end-to-end test: test connectivity with ping + - export TEST1_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json) + - echo $TEST1_10 + - echo $TEST1_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' + - export TEST1_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json) + - echo $TEST1_1 + - echo $TEST1_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' + - export TEST2_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json) + - echo $TEST2_1 + - echo $TEST2_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' + - export TEST2_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json) + - echo $TEST2_10 + - echo $TEST2_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' + - export TEST3_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json) + - echo $TEST3_1 + - echo $TEST3_1 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' + - export TEST3_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json) + - echo $TEST3_10 + - echo $TEST3_10 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' + + # Run end-to-end test: deconfigure service TFS + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-remove.sh + + # Dump configuration of the routers (after deconfigure TFS service) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Run end-to-end test: configure service IETF + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-create.sh + + # Dump configuration of the routers (after configure IETF service) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Run end-to-end test: test connectivity with ping + - export TEST1_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json) + - echo $TEST1_10 + - echo $TEST1_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' + - export TEST1_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json) + - echo $TEST1_1 + - echo $TEST1_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' + - export TEST2_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json) + - echo $TEST2_1 + - echo $TEST2_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' + - export TEST2_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json) + - echo $TEST2_10 + - echo $TEST2_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' + - export TEST3_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json) + - echo $TEST3_1 + - echo $TEST3_1 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' + - export TEST3_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json) + - echo $TEST3_10 + - echo $TEST3_10 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' + + # Run end-to-end test: deconfigure service IETF + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-remove.sh + + # Dump configuration of the routers (after deconfigure IETF service) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Run end-to-end test: cleanup scenario + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-cleanup.sh + + after_script: + # Dump configuration of the routers (on after_script) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Dump TeraFlowSDN component logs + - source src/tests/${TEST_NAME}/deploy_specs.sh + - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/deviceservice -c server + - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/pathcompservice -c frontend + - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/serviceservice -c server + - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/nbiservice -c server + + # Clean up + - RUNNER_PATH=`pwd` + #- cd $PWD/src/tests/${TEST_NAME} + - cd /tmp/clab/${TEST_NAME} + - containerlab destroy --topo ${TEST_NAME}.clab.yml --cleanup || true + - sudo rm -rf clab-${TEST_NAME}/ .${TEST_NAME}.clab.yml.bak || true + - cd $RUNNER_PATH + - kubectl delete namespaces tfs || true + - docker ps --all --quiet | xargs --no-run-if-empty docker stop + - docker container prune --force + - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force + - docker network prune --force + - docker volume prune --all --force + - docker image prune --force + + #coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + artifacts: + when: always + reports: + junit: ./src/tests/${TEST_NAME}/report_*.xml diff --git a/src/tests/l2_vpn_gnmi_oc/README.md b/src/tests/l2_vpn_gnmi_oc/README.md new file mode 100644 index 000000000..c05c16826 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/README.md @@ -0,0 +1,120 @@ +# L2 VPN test with gNMI/OpenConfig + +## Emulated DataPlane Deployment +- ContainerLab +- Scenario +- Descriptor + +## TeraFlowSDN Deployment +```bash +cd ~/tfs-ctrl +source ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc/deploy_specs.sh +./deploy/all.sh +``` + +# ContainerLab - Arista cEOS - Commands + +## Download and install ContainerLab +```bash +sudo bash -c "$(curl -sL https://get.containerlab.dev)" -- -v 0.59.0 +``` + +## Download Arista cEOS image and create Docker image +```bash +cd ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc/ +docker import arista/cEOS64-lab-4.33.5M.tar ceos:4.33.5M +``` + +## Deploy scenario +```bash +cd ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc/ +sudo containerlab deploy --topo l2_vpn_gnmi_oc.clab.yml +``` + +## Inspect scenario +```bash +cd ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc/ +sudo containerlab inspect --topo l2_vpn_gnmi_oc.clab.yml +``` + +## Destroy scenario +```bash +cd ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc/ +sudo containerlab destroy --topo l2_vpn_gnmi_oc.clab.yml +sudo rm -rf clab-l2_vpn_gnmi_oc/ .l2_vpn_gnmi_oc.clab.yml.bak +``` + +## Access cEOS Bash/CLI +```bash +docker exec -it clab-l2_vpn_gnmi_oc-r1 bash +docker exec -it clab-l2_vpn_gnmi_oc-r2 bash +docker exec -it clab-l2_vpn_gnmi_oc-r1 Cli +docker exec -it clab-l2_vpn_gnmi_oc-r2 Cli +``` + +## Configure ContainerLab clients +```bash +docker exec -it clab-l2_vpn_gnmi_oc-dc1 bash + ip address add 172.16.1.10/24 dev eth1 + ip route add 172.16.2.0/24 via 172.16.1.1 + ping 172.16.2.10 + +docker exec -it clab-l2_vpn_gnmi_oc-dc2 bash + ip address add 172.16.2.10/24 dev eth1 + ip route add 172.16.1.0/24 via 172.16.2.1 + ping 172.16.1.10 +``` + +## Install gNMIc +```bash +sudo bash -c "$(curl -sL https://get-gnmic.kmrd.dev)" +``` + +## gNMI Capabilities request +```bash +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure capabilities +``` + +## gNMI Get request +```bash +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path / > r1.json +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path /interfaces/interface > r1-ifaces.json +``` + +## gNMI Set request +```bash +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --update-path /system/config/hostname --update-value srl11 +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path /system/config/hostname + +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set \ +--update-path '/network-instances/network-instance[name=default]/vlans/vlan[vlan-id=200]/config/vlan-id' --update-value 200 \ +--update-path '/interfaces/interface[name=Ethernet10]/config/name' --update-value '"Ethernet10"' \ +--update-path '/interfaces/interface[name=Ethernet10]/ethernet/switched-vlan/config/interface-mode' --update-value '"ACCESS"' \ +--update-path '/interfaces/interface[name=Ethernet10]/ethernet/switched-vlan/config/access-vlan' --update-value 200 \ +--update-path '/interfaces/interface[name=Ethernet2]/config/name' --update-value '"Ethernet2"' \ +--update-path '/interfaces/interface[name=Ethernet2]/ethernet/switched-vlan/config/interface-mode' --update-value '"TRUNK"' +--update-path '/interfaces/interface[name=Ethernet2]/ethernet/switched-vlan/config/trunk-vlans' --update-value 200 + +``` + +## Subscribe request +```bash +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf subscribe --path /interfaces/interface[name=Management0]/state/ + +# In another terminal, you can generate traffic opening SSH connection +ssh admin@clab-l2_vpn_gnmi_oc-r1 +``` + +# Check configurations done: +```bash +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path '/' > r1-all.json +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path '/network-instances' > r1-nis.json +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path '/interfaces' > r1-ifs.json +``` + +# Delete elements: +```bash +--address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/network-instances/network-instance[name=b19229e8]' +--address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]' +--address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]' +``` -- GitLab From 51d2678b6376c933711d797abb681303fc2ea784 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 14 Jan 2026 15:39:59 +0000 Subject: [PATCH 05/79] End-to-end test - EUCNC24: - Minor fix in GitLab CI descriptor --- src/tests/eucnc24/.gitlab-ci.yml | 66 ++++++++++++++++---------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/src/tests/eucnc24/.gitlab-ci.yml b/src/tests/eucnc24/.gitlab-ci.yml index ee99ea271..a4fbac0ab 100644 --- a/src/tests/eucnc24/.gitlab-ci.yml +++ b/src/tests/eucnc24/.gitlab-ci.yml @@ -130,7 +130,7 @@ end2end_test eucnc24: - cp -R src/tests/${TEST_NAME}/clab/* /tmp/clab/${TEST_NAME} - tree -la /tmp/clab/${TEST_NAME} - cd /tmp/clab/${TEST_NAME} - - containerlab deploy --reconfigure --topo eucnc24.clab.yml + - containerlab deploy --reconfigure --topo ${TEST_NAME}.clab.yml - cd $RUNNER_PATH # Wait for initialization of Device NOSes @@ -138,9 +138,9 @@ end2end_test eucnc24: - docker ps -a # Dump configuration of the routers (before any configuration) - - containerlab exec --name eucnc24 --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" # Configure TeraFlowSDN deployment # Uncomment if DEBUG log level is needed for the components @@ -198,27 +198,27 @@ end2end_test eucnc24: $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-create.sh # Dump configuration of the routers (after configure TFS service) - - containerlab exec --name eucnc24 --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" # Run end-to-end test: test connectivity with ping - - export TEST1_10=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json) + - export TEST1_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json) - echo $TEST1_10 - echo $TEST1_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST1_1=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json) + - export TEST1_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json) - echo $TEST1_1 - echo $TEST1_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST2_1=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json) + - export TEST2_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json) - echo $TEST2_1 - echo $TEST2_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST2_10=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json) + - export TEST2_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json) - echo $TEST2_10 - echo $TEST2_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST3_1=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json) + - export TEST3_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json) - echo $TEST3_1 - echo $TEST3_1 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' - - export TEST3_10=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json) + - export TEST3_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json) - echo $TEST3_10 - echo $TEST3_10 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' @@ -230,9 +230,9 @@ end2end_test eucnc24: $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-remove.sh # Dump configuration of the routers (after deconfigure TFS service) - - containerlab exec --name eucnc24 --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" # Run end-to-end test: configure service IETF - > @@ -242,27 +242,27 @@ end2end_test eucnc24: $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-create.sh # Dump configuration of the routers (after configure IETF service) - - containerlab exec --name eucnc24 --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" # Run end-to-end test: test connectivity with ping - - export TEST1_10=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json) + - export TEST1_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json) - echo $TEST1_10 - echo $TEST1_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST1_1=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json) + - export TEST1_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json) - echo $TEST1_1 - echo $TEST1_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST2_1=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json) + - export TEST2_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json) - echo $TEST2_1 - echo $TEST2_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST2_10=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json) + - export TEST2_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json) - echo $TEST2_10 - echo $TEST2_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST3_1=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json) + - export TEST3_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json) - echo $TEST3_1 - echo $TEST3_1 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' - - export TEST3_10=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json) + - export TEST3_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json) - echo $TEST3_10 - echo $TEST3_10 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' @@ -274,9 +274,9 @@ end2end_test eucnc24: $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-remove.sh # Dump configuration of the routers (after deconfigure IETF service) - - containerlab exec --name eucnc24 --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" # Run end-to-end test: cleanup scenario - > @@ -287,9 +287,9 @@ end2end_test eucnc24: after_script: # Dump configuration of the routers (on after_script) - - containerlab exec --name eucnc24 --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" # Dump TeraFlowSDN component logs - source src/tests/${TEST_NAME}/deploy_specs.sh @@ -303,8 +303,8 @@ end2end_test eucnc24: - RUNNER_PATH=`pwd` #- cd $PWD/src/tests/${TEST_NAME} - cd /tmp/clab/${TEST_NAME} - - containerlab destroy --topo eucnc24.clab.yml --cleanup || true - - sudo rm -rf clab-eucnc24/ .eucnc24.clab.yml.bak || true + - containerlab destroy --topo ${TEST_NAME}.clab.yml --cleanup || true + - sudo rm -rf clab-${TEST_NAME}/ .${TEST_NAME}.clab.yml.bak || true - cd $RUNNER_PATH - kubectl delete namespaces tfs || true - docker ps --all --quiet | xargs --no-run-if-empty docker stop -- GitLab From eb777774f40ec2e27635e297a50420361c034828 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 14 Jan 2026 15:42:55 +0000 Subject: [PATCH 06/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed CLab scenario --- .../clab/l2_vpn_gnmi_oc.clab.yml | 8 +++- src/tests/l2_vpn_gnmi_oc/clab/r2-startup.cfg | 2 +- src/tests/l2_vpn_gnmi_oc/clab/r3-startup.cfg | 48 +++++++++++++++++++ 3 files changed, 56 insertions(+), 2 deletions(-) create mode 100644 src/tests/l2_vpn_gnmi_oc/clab/r3-startup.cfg diff --git a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml index 9c69e2b99..75cc2c90c 100644 --- a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml +++ b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml @@ -47,6 +47,11 @@ topology: mgmt-ipv4: 172.20.20.102 startup-config: r2-startup.cfg + r3: + kind: arista_ceos + mgmt-ipv4: 172.20.20.103 + startup-config: r3-startup.cfg + dc1: kind: linux mgmt-ipv4: 172.20.20.201 @@ -67,5 +72,6 @@ topology: links: - endpoints: ["r1:eth2", "r2:eth1"] + - endpoints: ["r2:eth3", "r3:eth2"] - endpoints: ["r1:eth10", "dc1:eth1"] - - endpoints: ["r2:eth10", "dc2:eth1"] + - endpoints: ["r3:eth10", "dc2:eth1"] diff --git a/src/tests/l2_vpn_gnmi_oc/clab/r2-startup.cfg b/src/tests/l2_vpn_gnmi_oc/clab/r2-startup.cfg index dbba5fbeb..6a1133703 100644 --- a/src/tests/l2_vpn_gnmi_oc/clab/r2-startup.cfg +++ b/src/tests/l2_vpn_gnmi_oc/clab/r2-startup.cfg @@ -29,7 +29,7 @@ management api netconf ! interface Ethernet1 ! -interface Ethernet10 +interface Ethernet3 ! interface Management0 ip address 172.20.20.102/24 diff --git a/src/tests/l2_vpn_gnmi_oc/clab/r3-startup.cfg b/src/tests/l2_vpn_gnmi_oc/clab/r3-startup.cfg new file mode 100644 index 000000000..946de6f77 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/clab/r3-startup.cfg @@ -0,0 +1,48 @@ +! device: r3 (cEOSLab, EOS-4.34.4M) +! +no aaa root +! +username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70 +! +management api http-commands + no shutdown +! +no service interface inactive port-id allocation disabled +! +transceiver qsfp default-mode 4x10G +! +service routing protocols model multi-agent +! +hostname r3 +! +spanning-tree mode mstp +! +system l1 + unsupported speed action error + unsupported error-correction action error +! +management api gnmi + transport grpc default +! +management api netconf + transport ssh default +! +interface Ethernet2 +! +interface Ethernet10 +! +interface Management0 + ip address 172.20.20.103/24 +! +ip routing +! +ip route 0.0.0.0/0 172.20.20.1 +! +router multicast + ipv4 + software-forwarding kernel + ! + ipv6 + software-forwarding kernel +! +end -- GitLab From e929a740c757540b97eba35ee46a3cc5a8e0fd71 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 14 Jan 2026 18:33:25 +0000 Subject: [PATCH 07/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed TFS Topology --- .../l2_vpn_gnmi_oc/data/tfs-topology.json | 34 ++++++++++++++++--- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-topology.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-topology.json index 49df9de42..ac87af62d 100644 --- a/src/tests/l2_vpn_gnmi_oc/data/tfs-topology.json +++ b/src/tests/l2_vpn_gnmi_oc/data/tfs-topology.json @@ -49,6 +49,17 @@ "username": "admin", "password": "admin", "use_tls": false }}} ]} + }, + { + "device_id": {"device_uuid": {"uuid": "r3"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.103"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "admin", "use_tls": false + }}} + ]} } ], "links": [ @@ -67,6 +78,21 @@ ] }, + { + "link_id": {"link_uuid": {"uuid": "r2/Ethernet3==r3/Ethernet2"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet3"}}, + {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "r3/Ethernet2==r2/Ethernet3"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet2"}}, + {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet3"}} + ] + }, + { "link_id": {"link_uuid": {"uuid": "r1/Ethernet10==dc1/eth1"}}, "link_endpoint_ids": [ @@ -83,17 +109,17 @@ }, { - "link_id": {"link_uuid": {"uuid": "r2/Ethernet10==dc2/eth1"}}, + "link_id": {"link_uuid": {"uuid": "r3/Ethernet10==dc2/eth1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet10"}}, + {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet10"}}, {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "eth1"}} ] }, { - "link_id": {"link_uuid": {"uuid": "dc2/eth1==r2/Ethernet10"}}, + "link_id": {"link_uuid": {"uuid": "dc2/eth1==r3/Ethernet10"}}, "link_endpoint_ids": [ {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "eth1"}}, - {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet10"}} + {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet10"}} ] } ] -- GitLab From c8d61c8ba09b3778f1f85be02314fef8d3fb293e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 14 Jan 2026 18:34:23 +0000 Subject: [PATCH 08/79] Service component - L2NM gNMI OpenConfig Service Handler: - Implemented first skeleton - Pending: add support for VLANs - Pending: remove static routes - Pending: implement config rule generation --- .../ConfigRuleComposer.py | 306 ++++++++++++++++++ .../L2NMGnmiOpenConfigServiceHandler.py | 185 +++++++++++ .../StaticRouteGenerator.py | 215 ++++++++++++ .../l2nm_gnmi_openconfig/__init__.py | 14 + 4 files changed, 720 insertions(+) create mode 100644 src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py create mode 100644 src/service/service/service_handlers/l2nm_gnmi_openconfig/L2NMGnmiOpenConfigServiceHandler.py create mode 100644 src/service/service/service_handlers/l2nm_gnmi_openconfig/StaticRouteGenerator.py create mode 100644 src/service/service/service_handlers/l2nm_gnmi_openconfig/__init__.py diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py new file mode 100644 index 000000000..cf0eacab5 --- /dev/null +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py @@ -0,0 +1,306 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, netaddr, re +from typing import Dict, List, Optional, Set, Tuple +from common.DeviceTypes import DeviceTypeEnum +from common.proto.context_pb2 import ConfigActionEnum, Device, EndPoint, Service +from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set +from service.service.service_handler_api.AnyTreeTools import TreeNode + +LOGGER = logging.getLogger(__name__) + +#NETWORK_INSTANCE = 'teraflowsdn' # TODO: investigate; sometimes it does not create/delete static rules properly +NETWORK_INSTANCE = 'default' +DEFAULT_NETWORK_INSTANCE = 'default' + +RE_IF = re.compile(r'^\/interface\[([^\]]+)\]$') +RE_SUBIF = re.compile(r'^\/interface\[([^\]]+)\]\/subinterface\[([^\]]+)\]$') +RE_SR = re.compile(r'^\/network_instance\[([^\]]+)\]\/protocols\[STATIC\]/route\[([^\:]+)\:([^\]]+)\]$') + +def _interface( + interface : str, if_type : Optional[str] = 'l3ipvlan', index : int = 0, vlan_id : Optional[int] = None, + address_ip : Optional[str] = None, address_prefix : Optional[int] = None, mtu : Optional[int] = None, + enabled : bool = True +) -> Tuple[str, Dict]: + path = '/interface[{:s}]/subinterface[{:d}]'.format(interface, index) + data = {'name': interface, 'type': if_type, 'index': index, 'enabled': enabled} + if if_type is not None: data['type'] = if_type + if vlan_id is not None: data['vlan_id'] = vlan_id + if address_ip is not None: data['address_ip'] = address_ip + if address_prefix is not None: data['address_prefix'] = address_prefix + if mtu is not None: data['mtu'] = mtu + return path, data + +def _network_instance(ni_name : str, ni_type : str) -> Tuple[str, Dict]: + path = '/network_instance[{:s}]'.format(ni_name) + data = {'name': ni_name, 'type': ni_type} + return path, data + +def _network_instance_protocol(ni_name : str, protocol : str) -> Tuple[str, Dict]: + path = '/network_instance[{:s}]/protocols[{:s}]'.format(ni_name, protocol) + data = {'name': ni_name, 'identifier': protocol, 'protocol_name': protocol} + return path, data + +def _network_instance_protocol_static(ni_name : str) -> Tuple[str, Dict]: + return _network_instance_protocol(ni_name, 'STATIC') + +def _network_instance_protocol_static_route( + ni_name : str, prefix : str, next_hop : str, metric : int +) -> Tuple[str, Dict]: + protocol = 'STATIC' + path = '/network_instance[{:s}]/protocols[{:s}]/static_route[{:s}:{:d}]'.format(ni_name, protocol, prefix, metric) + index = 'AUTO_{:d}_{:s}'.format(metric, next_hop.replace('.', '-')) + data = { + 'name': ni_name, 'identifier': protocol, 'protocol_name': protocol, + 'prefix': prefix, 'index': index, 'next_hop': next_hop, 'metric': metric + } + return path, data + +def _network_instance_interface(ni_name : str, interface : str, sub_interface_index : int) -> Tuple[str, Dict]: + sub_interface_name = '{:s}.{:d}'.format(interface, sub_interface_index) + path = '/network_instance[{:s}]/interface[{:s}]'.format(ni_name, sub_interface_name) + data = {'name': ni_name, 'id': sub_interface_name, 'interface': interface, 'subinterface': sub_interface_index} + return path, data + +class EndpointComposer: + def __init__(self, endpoint_uuid : str) -> None: + self.uuid = endpoint_uuid + self.objekt : Optional[EndPoint] = None + self.sub_interface_index = 0 + self.ipv4_address = None + self.ipv4_prefix_len = None + + def configure(self, endpoint_obj : Optional[EndPoint], settings : Optional[TreeNode]) -> None: + if endpoint_obj is not None: + self.objekt = endpoint_obj + if settings is None: return + json_settings : Dict = settings.value + + if 'address_ip' in json_settings: + self.ipv4_address = json_settings['address_ip'] + elif 'ip_address' in json_settings: + self.ipv4_address = json_settings['ip_address'] + else: + MSG = 'IP Address not found. Tried: address_ip and ip_address. endpoint_obj={:s} settings={:s}' + LOGGER.warning(MSG.format(str(endpoint_obj), str(settings))) + + if 'address_prefix' in json_settings: + self.ipv4_prefix_len = json_settings['address_prefix'] + elif 'prefix_length' in json_settings: + self.ipv4_prefix_len = json_settings['prefix_length'] + else: + MSG = 'IP Address Prefix not found. Tried: address_prefix and prefix_length. endpoint_obj={:s} settings={:s}' + LOGGER.warning(MSG.format(str(endpoint_obj), str(settings))) + + self.sub_interface_index = json_settings.get('index', 0) + + def get_config_rules(self, network_instance_name : str, delete : bool = False) -> List[Dict]: + if self.ipv4_address is None: return [] + if self.ipv4_prefix_len is None: return [] + json_config_rule = json_config_rule_delete if delete else json_config_rule_set + + config_rules : List[Dict] = list() + if network_instance_name != DEFAULT_NETWORK_INSTANCE: + config_rules.append(json_config_rule(*_network_instance_interface( + network_instance_name, self.objekt.name, self.sub_interface_index + ))) + + if delete: + config_rules.extend([ + json_config_rule(*_interface( + self.objekt.name, index=self.sub_interface_index, address_ip=None, + address_prefix=None, enabled=False + )), + ]) + else: + config_rules.extend([ + json_config_rule(*_interface( + self.objekt.name, index=self.sub_interface_index, address_ip=self.ipv4_address, + address_prefix=self.ipv4_prefix_len, enabled=True + )), + ]) + return config_rules + + def dump(self) -> Dict: + return { + 'index' : self.sub_interface_index, + 'address_ip' : self.ipv4_address, + 'address_prefix': self.ipv4_prefix_len, + } + + def __str__(self): + data = {'uuid': self.uuid} + if self.objekt is not None: data['name'] = self.objekt.name + data.update(self.dump()) + return json.dumps(data) + +class DeviceComposer: + def __init__(self, device_uuid : str) -> None: + self.uuid = device_uuid + self.objekt : Optional[Device] = None + self.aliases : Dict[str, str] = dict() # endpoint_name => endpoint_uuid + self.endpoints : Dict[str, EndpointComposer] = dict() # endpoint_uuid => EndpointComposer + self.connected : Set[str] = set() + self.static_routes : Dict[str, Dict[int, str]] = dict() # {prefix => {metric => next_hop}} + + def set_endpoint_alias(self, endpoint_name : str, endpoint_uuid : str) -> None: + self.aliases[endpoint_name] = endpoint_uuid + + def get_endpoint(self, endpoint_uuid : str) -> EndpointComposer: + endpoint_uuid = self.aliases.get(endpoint_uuid, endpoint_uuid) + if endpoint_uuid not in self.endpoints: + self.endpoints[endpoint_uuid] = EndpointComposer(endpoint_uuid) + return self.endpoints[endpoint_uuid] + + def configure(self, device_obj : Device, settings : Optional[TreeNode]) -> None: + self.objekt = device_obj + for endpoint_obj in device_obj.device_endpoints: + endpoint_uuid = endpoint_obj.endpoint_id.endpoint_uuid.uuid + self.set_endpoint_alias(endpoint_obj.name, endpoint_uuid) + self.get_endpoint(endpoint_obj.name).configure(endpoint_obj, None) + + # Find management interfaces + mgmt_ifaces = set() + for config_rule in device_obj.device_config.config_rules: + if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue + if config_rule.WhichOneof('config_rule') != 'custom': continue + config_rule_custom = config_rule.custom + match = RE_IF.match(config_rule_custom.resource_key) + if match is None: continue + if_name = match.groups()[0] + resource_value = json.loads(config_rule_custom.resource_value) + management = resource_value.get('management', False) + if management: mgmt_ifaces.add(if_name) + + # Find data plane interfaces + for config_rule in device_obj.device_config.config_rules: + if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue + if config_rule.WhichOneof('config_rule') != 'custom': continue + config_rule_custom = config_rule.custom + + match = RE_SUBIF.match(config_rule_custom.resource_key) + if match is not None: + if_name, subif_index = match.groups() + if if_name in mgmt_ifaces: continue + resource_value = json.loads(config_rule_custom.resource_value) + if 'address_ip' not in resource_value: continue + if 'address_prefix' not in resource_value: continue + ipv4_network = str(resource_value['address_ip']) + ipv4_prefix_len = int(resource_value['address_prefix']) + endpoint = self.get_endpoint(if_name) + endpoint.ipv4_address = ipv4_network + endpoint.ipv4_prefix_len = ipv4_prefix_len + endpoint.sub_interface_index = int(subif_index) + endpoint_ip_network = netaddr.IPNetwork('{:s}/{:d}'.format(ipv4_network, ipv4_prefix_len)) + if '0.0.0.0/' not in str(endpoint_ip_network.cidr): + self.connected.add(str(endpoint_ip_network.cidr)) + + match = RE_SR.match(config_rule_custom.resource_key) + if match is not None: + ni_name, prefix, metric = match.groups() + if ni_name != NETWORK_INSTANCE: continue + resource_value : Dict = json.loads(config_rule_custom.resource_value) + next_hop = resource_value['next_hop'] + self.static_routes.setdefault(prefix, dict())[metric] = next_hop + + if settings is None: return + json_settings : Dict = settings.value + static_routes : List[Dict] = json_settings.get('static_routes', []) + for static_route in static_routes: + prefix = static_route['prefix'] + next_hop = static_route['next_hop'] + metric = static_route.get('metric', 0) + self.static_routes.setdefault(prefix, dict())[metric] = next_hop + + def get_config_rules(self, network_instance_name : str, delete : bool = False) -> List[Dict]: + SELECTED_DEVICES = { + DeviceTypeEnum.PACKET_POP.value, + DeviceTypeEnum.PACKET_ROUTER.value, + DeviceTypeEnum.EMULATED_PACKET_ROUTER.value + } + if self.objekt.device_type not in SELECTED_DEVICES: return [] + + json_config_rule = json_config_rule_delete if delete else json_config_rule_set + config_rules : List[Dict] = list() + if network_instance_name != DEFAULT_NETWORK_INSTANCE: + json_config_rule(*_network_instance(network_instance_name, 'L3VRF')) + for endpoint in self.endpoints.values(): + config_rules.extend(endpoint.get_config_rules(network_instance_name, delete=delete)) + if len(self.static_routes) > 0: + config_rules.append( + json_config_rule(*_network_instance_protocol_static(network_instance_name)) + ) + for prefix, metric_next_hop in self.static_routes.items(): + for metric, next_hop in metric_next_hop.items(): + config_rules.append( + json_config_rule(*_network_instance_protocol_static_route( + network_instance_name, prefix, next_hop, metric + )) + ) + if delete: config_rules = list(reversed(config_rules)) + return config_rules + + def dump(self) -> Dict: + return { + 'endpoints' : { + endpoint_uuid : endpoint.dump() + for endpoint_uuid, endpoint in self.endpoints.items() + }, + 'connected' : list(self.connected), + 'static_routes' : self.static_routes, + } + + def __str__(self): + data = {'uuid': self.uuid} + if self.objekt is not None: data['name'] = self.objekt.name + data.update(self.dump()) + return json.dumps(data) + +class ConfigRuleComposer: + def __init__(self) -> None: + self.objekt : Optional[Service] = None + self.aliases : Dict[str, str] = dict() # device_name => device_uuid + self.devices : Dict[str, DeviceComposer] = dict() # device_uuid => DeviceComposer + + def set_device_alias(self, device_name : str, device_uuid : str) -> None: + self.aliases[device_name] = device_uuid + + def get_device(self, device_uuid : str) -> DeviceComposer: + device_uuid = self.aliases.get(device_uuid, device_uuid) + if device_uuid not in self.devices: + self.devices[device_uuid] = DeviceComposer(device_uuid) + return self.devices[device_uuid] + + def configure(self, service_obj : Service, settings : Optional[TreeNode]) -> None: + self.objekt = service_obj + if settings is None: return + #json_settings : Dict = settings.value + # For future use + + def get_config_rules( + self, network_instance_name : str = NETWORK_INSTANCE, delete : bool = False + ) -> Dict[str, List[Dict]]: + return { + device_uuid : device.get_config_rules(network_instance_name, delete=delete) + for device_uuid, device in self.devices.items() + } + + def dump(self) -> Dict: + return { + 'devices' : { + device_uuid : device.dump() + for device_uuid, device in self.devices.items() + } + } diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/L2NMGnmiOpenConfigServiceHandler.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/L2NMGnmiOpenConfigServiceHandler.py new file mode 100644 index 000000000..125d4f759 --- /dev/null +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/L2NMGnmiOpenConfigServiceHandler.py @@ -0,0 +1,185 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import Any, Dict, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.proto.context_pb2 import ConfigRule, ConnectionId, DeviceId, Service +from common.tools.object_factory.Connection import json_connection_id +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.SettingsHandler import SettingsHandler +from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching +from service.service.task_scheduler.TaskExecutor import TaskExecutor +from service.service.tools.EndpointIdFormatters import endpointids_to_raw +from .ConfigRuleComposer import ConfigRuleComposer +from .StaticRouteGenerator import StaticRouteGenerator + +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l2nm_gnmi_openconfig'}) + +class L2NMGnmiOpenConfigServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service : Service, task_executor : TaskExecutor, **settings + ) -> None: + self.__service = service + self.__task_executor = task_executor + self.__settings_handler = SettingsHandler(service.service_config, **settings) + self.__config_rule_composer = ConfigRuleComposer() + self.__static_route_generator = StaticRouteGenerator(self.__config_rule_composer) + self.__endpoint_map : Dict[Tuple[str, str], Tuple[str, str]] = dict() + + def _compose_config_rules(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> None: + if len(endpoints) % 2 != 0: raise Exception('Number of endpoints should be even') + + service_settings = self.__settings_handler.get_service_settings() + self.__config_rule_composer.configure(self.__service, service_settings) + + for endpoint in endpoints: + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) + + device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_settings = self.__settings_handler.get_device_settings(device_obj) + self.__config_rule_composer.set_device_alias(device_obj.name, device_uuid) + _device = self.__config_rule_composer.get_device(device_obj.name) + _device.configure(device_obj, device_settings) + + endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) + endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) + _device.set_endpoint_alias(endpoint_obj.name, endpoint_uuid) + _endpoint = _device.get_endpoint(endpoint_obj.name) + _endpoint.configure(endpoint_obj, endpoint_settings) + + self.__endpoint_map[(device_uuid, endpoint_uuid)] = (device_obj.name, endpoint_obj.name) + + LOGGER.debug('[pre] config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump()))) + self.__static_route_generator.compose(endpoints) + LOGGER.debug('[post] config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump()))) + + def _do_configurations( + self, config_rules_per_device : Dict[str, List[Dict]], endpoints : List[Tuple[str, str, Optional[str]]], + delete : bool = False + ) -> List[Union[bool, Exception]]: + # Configuration is done atomically on each device, all OK / all KO per device + results_per_device = dict() + for device_name,json_config_rules in config_rules_per_device.items(): + try: + device_obj = self.__config_rule_composer.get_device(device_name).objekt + if len(json_config_rules) == 0: continue + del device_obj.device_config.config_rules[:] + for json_config_rule in json_config_rules: + device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device_obj) + results_per_device[device_name] = True + except Exception as e: # pylint: disable=broad-exception-caught + verb = 'deconfigure' if delete else 'configure' + MSG = 'Unable to {:s} Device({:s}) : ConfigRules({:s})' + LOGGER.exception(MSG.format(verb, str(device_name), str(json_config_rules))) + results_per_device[device_name] = e + + results = [] + for endpoint in endpoints: + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) + device_name, _ = self.__endpoint_map[(device_uuid, endpoint_uuid)] + if device_name not in results_per_device: continue + results.append(results_per_device[device_name]) + return results + + @metered_subclass_method(METRICS_POOL) + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + #service_uuid = self.__service.service_id.service_uuid.uuid + connection = self.__task_executor.get_connection(ConnectionId(**json_connection_id(connection_uuid))) + connection_endpoint_ids = endpointids_to_raw(connection.path_hops_endpoint_ids) + self._compose_config_rules(connection_endpoint_ids) + #network_instance_name = service_uuid.split('-')[0] + #config_rules_per_device = self.__config_rule_composer.get_config_rules(network_instance_name, delete=False) + config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=False) + LOGGER.debug('config_rules_per_device={:s}'.format(json.dumps(config_rules_per_device))) + results = self._do_configurations(config_rules_per_device, endpoints, delete=False) + LOGGER.debug('results={:s}'.format(str(results))) + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + #service_uuid = self.__service.service_id.service_uuid.uuid + connection = self.__task_executor.get_connection(ConnectionId(**json_connection_id(connection_uuid))) + connection_endpoint_ids = endpointids_to_raw(connection.path_hops_endpoint_ids) + self._compose_config_rules(connection_endpoint_ids) + #network_instance_name = service_uuid.split('-')[0] + #config_rules_per_device = self.__config_rule_composer.get_config_rules(network_instance_name, delete=True) + config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=True) + LOGGER.debug('config_rules_per_device={:s}'.format(json.dumps(config_rules_per_device))) + results = self._do_configurations(config_rules_per_device, endpoints, delete=True) + LOGGER.debug('results={:s}'.format(str(results))) + return results + + @metered_subclass_method(METRICS_POOL) + def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + results = [] + for resource in resources: + try: + resource_value = json.loads(resource[1]) + self.__settings_handler.set(resource[0], resource_value) + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to SetConfig({:s})'.format(str(resource))) + results.append(e) + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + results = [] + for resource in resources: + try: + self.__settings_handler.delete(resource[0]) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to DeleteConfig({:s})'.format(str(resource))) + results.append(e) + + return results diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/StaticRouteGenerator.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/StaticRouteGenerator.py new file mode 100644 index 000000000..0d0d2bf52 --- /dev/null +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/StaticRouteGenerator.py @@ -0,0 +1,215 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, netaddr, sys +from typing import List, Optional, Tuple +from .ConfigRuleComposer import ConfigRuleComposer + +LOGGER = logging.getLogger(__name__) + +# Used to infer routing networks for adjacent ports when there is no hint in device/endpoint settings +ROOT_NEIGHBOR_ROUTING_NETWORK = netaddr.IPNetwork('10.254.254.0/16') +NEIGHBOR_ROUTING_NETWORKS_PREFIX_LEN = 30 +NEIGHBOR_ROUTING_NETWORKS = set(ROOT_NEIGHBOR_ROUTING_NETWORK.subnet(NEIGHBOR_ROUTING_NETWORKS_PREFIX_LEN)) + +def _generate_neighbor_addresses() -> Tuple[netaddr.IPAddress, netaddr.IPAddress, int]: + ip_network = NEIGHBOR_ROUTING_NETWORKS.pop() + ip_addresses = list(ip_network.iter_hosts()) + ip_addresses.append(NEIGHBOR_ROUTING_NETWORKS_PREFIX_LEN) + return ip_addresses + +def _compute_gateway(ip_network : netaddr.IPNetwork, gateway_host=1) -> netaddr.IPAddress: + return netaddr.IPAddress(ip_network.cidr.first + gateway_host) + +def _compose_ipv4_network(ipv4_network, ipv4_prefix_len) -> netaddr.IPNetwork: + return netaddr.IPNetwork('{:s}/{:d}'.format(str(ipv4_network), int(ipv4_prefix_len))) + +class StaticRouteGenerator: + def __init__(self, config_rule_composer : ConfigRuleComposer) -> None: + self._config_rule_composer = config_rule_composer + + def compose(self, connection_hop_list : List[Tuple[str, str, Optional[str]]]) -> None: + link_endpoints = self._compute_link_endpoints(connection_hop_list) + LOGGER.debug('link_endpoints = {:s}'.format(str(link_endpoints))) + + self._compute_link_addresses(link_endpoints) + LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self._config_rule_composer.dump()))) + + self._discover_connected_networks(connection_hop_list) + LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self._config_rule_composer.dump()))) + + # Compute and propagate static routes forward (service_endpoint_a => service_endpoint_b) + self._compute_static_routes(link_endpoints) + + # Compute and propagate static routes backward (service_endpoint_b => service_endpoint_a) + reversed_endpoints = list(reversed(connection_hop_list)) + reversed_link_endpoints = self._compute_link_endpoints(reversed_endpoints) + LOGGER.debug('reversed_link_endpoints = {:s}'.format(str(reversed_link_endpoints))) + self._compute_static_routes(reversed_link_endpoints) + + LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self._config_rule_composer.dump()))) + + def _compute_link_endpoints( + self, connection_hop_list : List[Tuple[str, str, Optional[str]]] + ) -> List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]]: + # In some cases connection_hop_list might contain repeated endpoints, remove them here. + added_connection_hops = set() + filtered_connection_hop_list = list() + for connection_hop in connection_hop_list: + if connection_hop in added_connection_hops: continue + filtered_connection_hop_list.append(connection_hop) + added_connection_hops.add(connection_hop) + connection_hop_list = filtered_connection_hop_list + + # In some cases connection_hop_list first and last items might be internal endpoints of + # devices instead of link endpoints. Filter those endpoints not reaching a new device. + if len(connection_hop_list) > 2 and connection_hop_list[0][0] == connection_hop_list[1][0]: + # same device on first 2 endpoints + connection_hop_list = connection_hop_list[1:] + if len(connection_hop_list) > 2 and connection_hop_list[-1][0] == connection_hop_list[-2][0]: + # same device on last 2 endpoints + connection_hop_list = connection_hop_list[:-1] + + num_connection_hops = len(connection_hop_list) + if num_connection_hops % 2 != 0: raise Exception('Number of connection hops must be even') + if num_connection_hops < 4: raise Exception('Number of connection hops must be >= 4') + + it_connection_hops = iter(connection_hop_list) + return list(zip(it_connection_hops, it_connection_hops)) + + def _compute_link_addresses( + self, link_endpoints_list : List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]] + ) -> None: + for link_endpoints in link_endpoints_list: + device_endpoint_a, device_endpoint_b = link_endpoints + + device_uuid_a, endpoint_uuid_a = device_endpoint_a[0:2] + endpoint_a = self._config_rule_composer.get_device(device_uuid_a).get_endpoint(endpoint_uuid_a) + + device_uuid_b, endpoint_uuid_b = device_endpoint_b[0:2] + endpoint_b = self._config_rule_composer.get_device(device_uuid_b).get_endpoint(endpoint_uuid_b) + + if endpoint_a.ipv4_address is None and endpoint_b.ipv4_address is None: + ip_endpoint_a, ip_endpoint_b, prefix_len = _generate_neighbor_addresses() + endpoint_a.ipv4_address = str(ip_endpoint_a) + endpoint_a.ipv4_prefix_len = prefix_len + endpoint_b.ipv4_address = str(ip_endpoint_b) + endpoint_b.ipv4_prefix_len = prefix_len + elif endpoint_a.ipv4_address is not None and endpoint_b.ipv4_address is None: + prefix_len = endpoint_a.ipv4_prefix_len + ip_network_a = _compose_ipv4_network(endpoint_a.ipv4_address, prefix_len) + if prefix_len > 30: + MSG = 'Unsupported prefix_len for {:s}: {:s}' + raise Exception(MSG.format(str(endpoint_a), str(prefix_len))) + ip_endpoint_b = _compute_gateway(ip_network_a, gateway_host=1) + if ip_endpoint_b == ip_network_a.ip: + ip_endpoint_b = _compute_gateway(ip_network_a, gateway_host=2) + endpoint_b.ipv4_address = str(ip_endpoint_b) + endpoint_b.ipv4_prefix_len = prefix_len + elif endpoint_a.ipv4_address is None and endpoint_b.ipv4_address is not None: + prefix_len = endpoint_b.ipv4_prefix_len + ip_network_b = _compose_ipv4_network(endpoint_b.ipv4_address, prefix_len) + if prefix_len > 30: + MSG = 'Unsupported prefix_len for {:s}: {:s}' + raise Exception(MSG.format(str(endpoint_b), str(prefix_len))) + ip_endpoint_a = _compute_gateway(ip_network_b, gateway_host=1) + if ip_endpoint_a == ip_network_b.ip: + ip_endpoint_a = _compute_gateway(ip_network_b, gateway_host=2) + endpoint_a.ipv4_address = str(ip_endpoint_a) + endpoint_a.ipv4_prefix_len = prefix_len + elif endpoint_a.ipv4_address is not None and endpoint_b.ipv4_address is not None: + ip_network_a = _compose_ipv4_network(endpoint_a.ipv4_address, endpoint_a.ipv4_prefix_len) + ip_network_b = _compose_ipv4_network(endpoint_b.ipv4_address, endpoint_b.ipv4_prefix_len) + if ip_network_a.cidr != ip_network_b.cidr: + MSG = 'Incompatible CIDRs: endpoint_a({:s})=>{:s} endpoint_b({:s})=>{:s}' + raise Exception(MSG.format(str(endpoint_a), str(ip_network_a), str(endpoint_b), str(ip_network_b))) + if ip_network_a.ip == ip_network_b.ip: + MSG = 'Duplicated IP: endpoint_a({:s})=>{:s} endpoint_b({:s})=>{:s}' + raise Exception(MSG.format(str(endpoint_a), str(ip_network_a), str(endpoint_b), str(ip_network_b))) + + def _discover_connected_networks(self, connection_hop_list : List[Tuple[str, str, Optional[str]]]) -> None: + for connection_hop in connection_hop_list: + device_uuid, endpoint_uuid = connection_hop[0:2] + device = self._config_rule_composer.get_device(device_uuid) + endpoint = device.get_endpoint(endpoint_uuid) + + if endpoint.ipv4_address is None: continue + ip_network = _compose_ipv4_network(endpoint.ipv4_address, endpoint.ipv4_prefix_len) + + if '0.0.0.0/' in str(ip_network.cidr): continue + device.connected.add(str(ip_network.cidr)) + + def _compute_static_routes( + self, link_endpoints_list : List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]] + ) -> None: + for link_endpoints in link_endpoints_list: + device_endpoint_a, device_endpoint_b = link_endpoints + + device_uuid_a, endpoint_uuid_a = device_endpoint_a[0:2] + device_a = self._config_rule_composer.get_device(device_uuid_a) + endpoint_a = device_a.get_endpoint(endpoint_uuid_a) + + device_uuid_b, endpoint_uuid_b = device_endpoint_b[0:2] + device_b = self._config_rule_composer.get_device(device_uuid_b) + endpoint_b = device_b.get_endpoint(endpoint_uuid_b) + + # Compute static routes from networks connected in device_a + for ip_network_a in device_a.connected: + if ip_network_a in device_b.connected: continue + if ip_network_a in device_b.static_routes: continue + if ip_network_a in ROOT_NEIGHBOR_ROUTING_NETWORK: continue + endpoint_a_ip_network = _compose_ipv4_network(endpoint_a.ipv4_address, endpoint_a.ipv4_prefix_len) + next_hop = str(endpoint_a_ip_network.ip) + metric = 1 + device_b.static_routes.setdefault(ip_network_a, dict())[metric] = next_hop + + # Compute static routes from networks connected in device_b + for ip_network_b in device_b.connected: + if ip_network_b in device_a.connected: continue + if ip_network_b in device_a.static_routes: continue + if ip_network_b in ROOT_NEIGHBOR_ROUTING_NETWORK: continue + endpoint_b_ip_network = _compose_ipv4_network(endpoint_b.ipv4_address, endpoint_b.ipv4_prefix_len) + next_hop = str(endpoint_b_ip_network.ip) + metric = 1 + device_a.static_routes.setdefault(ip_network_b, dict())[metric] = next_hop + + # Propagate static routes from networks connected in device_a + for ip_network_a, metric_next_hop in device_a.static_routes.items(): + if ip_network_a in device_b.connected: continue + if ip_network_a in ROOT_NEIGHBOR_ROUTING_NETWORK: continue + endpoint_a_ip_network = _compose_ipv4_network(endpoint_a.ipv4_address, endpoint_a.ipv4_prefix_len) + if ip_network_a in device_b.static_routes: + current_metric = min(device_b.static_routes[ip_network_a].keys()) + else: + current_metric = int(sys.float_info.max) + for metric, next_hop in metric_next_hop.items(): + new_metric = metric + 1 + if new_metric >= current_metric: continue + next_hop_a = str(endpoint_a_ip_network.ip) + device_b.static_routes.setdefault(ip_network_a, dict())[metric] = next_hop_a + + # Propagate static routes from networks connected in device_b + for ip_network_b in device_b.static_routes.keys(): + if ip_network_b in device_a.connected: continue + if ip_network_b in ROOT_NEIGHBOR_ROUTING_NETWORK: continue + endpoint_b_ip_network = _compose_ipv4_network(endpoint_b.ipv4_address, endpoint_b.ipv4_prefix_len) + if ip_network_b in device_a.static_routes: + current_metric = min(device_a.static_routes[ip_network_b].keys()) + else: + current_metric = int(sys.float_info.max) + for metric, next_hop in metric_next_hop.items(): + new_metric = metric + 1 + if new_metric >= current_metric: continue + next_hop_b = str(endpoint_b_ip_network.ip) + device_a.static_routes.setdefault(ip_network_b, dict())[metric] = next_hop_b diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/__init__.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + -- GitLab From 65778c4248892761fb3d28821df5e4817c1ccb2f Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 16 Jan 2026 12:04:45 +0000 Subject: [PATCH 09/79] Device component: - Fixed typehinting and factorized DriverFactory and related Exceptions --- .../service/driver_api/DriverFactory.py | 24 ++++++++++++------- src/device/service/driver_api/Exceptions.py | 22 +++++++++-------- 2 files changed, 27 insertions(+), 19 deletions(-) diff --git a/src/device/service/driver_api/DriverFactory.py b/src/device/service/driver_api/DriverFactory.py index 38ae0ac56..fefa3cd91 100644 --- a/src/device/service/driver_api/DriverFactory.py +++ b/src/device/service/driver_api/DriverFactory.py @@ -14,8 +14,7 @@ import logging from enum import Enum -from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type -from ._Driver import _Driver +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Type from .Exceptions import ( AmbiguousFilterException, EmptyFilterFieldException, UnsatisfiedFilterException, UnsupportedDriverClassException, @@ -23,12 +22,20 @@ from .Exceptions import ( ) from .FilterFields import FILTER_FIELD_ALLOWED_VALUES, FilterFieldEnum +if TYPE_CHECKING: + from ._Driver import _Driver + LOGGER = logging.getLogger(__name__) SUPPORTED_FILTER_FIELDS = set(FILTER_FIELD_ALLOWED_VALUES.keys()) +def check_is_class_valid(driver_class : Type['_Driver']) -> None: + from ._Driver import _Driver + if not issubclass(driver_class, _Driver): + raise UnsupportedDriverClassException(str(driver_class)) + def sanitize_filter_fields( filter_fields : Dict[FilterFieldEnum, Any], driver_name : Optional[str] = None ) -> Dict[FilterFieldEnum, Any]: @@ -67,14 +74,13 @@ def sanitize_filter_fields( class DriverFactory: def __init__( - self, drivers : List[Tuple[Type[_Driver], List[Dict[FilterFieldEnum, Any]]]] + self, drivers : List[Tuple[Type['_Driver'], List[Dict[FilterFieldEnum, Any]]]] ) -> None: - self.__drivers : List[Tuple[Type[_Driver], Dict[FilterFieldEnum, Any]]] = list() + self.__drivers : List[Tuple[Type['_Driver'], Dict[FilterFieldEnum, Any]]] = list() for driver_class,filter_field_sets in drivers: - #if not issubclass(driver_class, _Driver): - # raise UnsupportedDriverClassException(str(driver_class)) - driver_name = driver_class #.__name__ + check_is_class_valid(driver_class) + driver_name = driver_class.__name__ for filter_fields in filter_field_sets: filter_fields = {k.value:v for k,v in filter_fields.items()} @@ -86,7 +92,7 @@ class DriverFactory: def is_driver_compatible( self, driver_filter_fields : Dict[FilterFieldEnum, Any], - selection_filter_fields : Dict[FilterFieldEnum, Any] + selection_filter_fields : Dict[FilterFieldEnum, Any] ) -> bool: # by construction empty driver_filter_fields are not allowed # by construction empty selection_filter_fields are not allowed @@ -102,7 +108,7 @@ class DriverFactory: return True - def get_driver_class(self, **selection_filter_fields) -> _Driver: + def get_driver_class(self, **selection_filter_fields) -> '_Driver': sanitized_filter_fields = sanitize_filter_fields(selection_filter_fields) compatible_drivers : List[Tuple[Type[_Driver], Dict[FilterFieldEnum, Any]]] = [ diff --git a/src/device/service/driver_api/Exceptions.py b/src/device/service/driver_api/Exceptions.py index 8f33ebc57..86c2afcef 100644 --- a/src/device/service/driver_api/Exceptions.py +++ b/src/device/service/driver_api/Exceptions.py @@ -13,22 +13,22 @@ # limitations under the License. class UnsatisfiedFilterException(Exception): - def __init__(self, filter_fields): + def __init__(self, filter_fields) -> None: msg = 'No Driver satisfies FilterFields({:s})' super().__init__(msg.format(str(filter_fields))) class AmbiguousFilterException(Exception): - def __init__(self, filter_fields, compatible_drivers): + def __init__(self, filter_fields, compatible_drivers) -> None: msg = 'Multiple Drivers satisfy FilterFields({:s}): {:s}' super().__init__(msg.format(str(filter_fields), str(compatible_drivers))) class UnsupportedDriverClassException(Exception): - def __init__(self, driver_class_name): + def __init__(self, driver_class_name) -> None: msg = 'Class({:s}) is not a subclass of _Driver' super().__init__(msg.format(str(driver_class_name))) class EmptyFilterFieldException(Exception): - def __init__(self, filter_fields, driver_class_name=None): + def __init__(self, filter_fields, driver_class_name=None) -> None: if driver_class_name: msg = 'Empty FilterField({:s}) specified by Driver({:s}) is not supported' msg = msg.format(str(filter_fields), str(driver_class_name)) @@ -38,7 +38,7 @@ class EmptyFilterFieldException(Exception): super().__init__(msg) class UnsupportedFilterFieldException(Exception): - def __init__(self, unsupported_filter_fields, driver_class_name=None): + def __init__(self, unsupported_filter_fields, driver_class_name=None) -> None: if driver_class_name: msg = 'FilterFields({:s}) specified by Driver({:s}) are not supported' msg = msg.format(str(unsupported_filter_fields), str(driver_class_name)) @@ -48,7 +48,9 @@ class UnsupportedFilterFieldException(Exception): super().__init__(msg) class UnsupportedFilterFieldValueException(Exception): - def __init__(self, filter_field_name, filter_field_value, allowed_filter_field_values, driver_class_name=None): + def __init__( + self, filter_field_name, filter_field_value, allowed_filter_field_values, driver_class_name=None + ) -> None: if driver_class_name: msg = 'FilterField({:s}={:s}) specified by Driver({:s}) is not supported. Allowed values are {:s}' msg = msg.format( @@ -60,24 +62,24 @@ class UnsupportedFilterFieldValueException(Exception): super().__init__(msg) class DriverInstanceCacheTerminatedException(Exception): - def __init__(self): + def __init__(self) -> None: msg = 'DriverInstanceCache is terminated. No new instances can be processed.' super().__init__(msg) class UnsupportedResourceKeyException(Exception): - def __init__(self, resource_key): + def __init__(self, resource_key) -> None: msg = 'ResourceKey({:s}) not supported' msg = msg.format(str(resource_key)) super().__init__(msg) class ConfigFieldNotFoundException(Exception): - def __init__(self, config_field_name): + def __init__(self, config_field_name) -> None: msg = 'ConfigField({:s}) not specified in resource' msg = msg.format(str(config_field_name)) super().__init__(msg) class ConfigFieldsNotSupportedException(Exception): - def __init__(self, config_fields): + def __init__(self, config_fields) -> None: msg = 'ConfigFields({:s}) not supported in resource' msg = msg.format(str(config_fields)) super().__init__(msg) -- GitLab From 6c8ae3bab28f2e2d5b67babe3143c6c371d2750d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 16 Jan 2026 12:07:13 +0000 Subject: [PATCH 10/79] Service component: - Upgraded Service Handler Factory (aligned to device driver factory) to prevent selection of partial-fit service handlers resulting in wrong service handler selection --- .../service/service_handler_api/Exceptions.py | 36 ++-- .../service_handler_api/FilterFields.py | 62 ++----- .../ServiceHandlerFactory.py | 173 ++++++++++-------- 3 files changed, 142 insertions(+), 129 deletions(-) diff --git a/src/service/service/service_handler_api/Exceptions.py b/src/service/service/service_handler_api/Exceptions.py index 7a10ff334..bc6ac4c8f 100644 --- a/src/service/service/service_handler_api/Exceptions.py +++ b/src/service/service/service_handler_api/Exceptions.py @@ -13,17 +13,32 @@ # limitations under the License. class UnsatisfiedFilterException(Exception): - def __init__(self, filter_fields): + def __init__(self, filter_fields) -> None: msg = 'No ServiceHandler satisfies FilterFields({:s})' super().__init__(msg.format(str(filter_fields))) +class AmbiguousFilterException(Exception): + def __init__(self, filter_fields, compatible_service_handlers) -> None: + msg = 'Multiple Service Handlers satisfy FilterFields({:s}): {:s}' + super().__init__(msg.format(str(filter_fields), str(compatible_service_handlers))) + class UnsupportedServiceHandlerClassException(Exception): - def __init__(self, service_handler_class_name): + def __init__(self, service_handler_class_name) -> None: msg = 'Class({:s}) is not a subclass of _ServiceHandler' super().__init__(msg.format(str(service_handler_class_name))) +class EmptyFilterFieldException(Exception): + def __init__(self, filter_fields, service_handler_class_name=None) -> None: + if service_handler_class_name: + msg = 'Empty FilterField({:s}) specified by ServiceHandler({:s}) is not supported' + msg = msg.format(str(filter_fields), str(service_handler_class_name)) + else: + msg = 'Empty FilterField({:s}) is not supported' + msg = msg.format(str(filter_fields)) + super().__init__(msg) + class UnsupportedFilterFieldException(Exception): - def __init__(self, unsupported_filter_fields, service_handler_class_name=None): + def __init__(self, unsupported_filter_fields, service_handler_class_name=None) -> None: if service_handler_class_name: msg = 'FilterFields({:s}) specified by ServiceHandler({:s}) are not supported' msg = msg.format(str(unsupported_filter_fields), str(service_handler_class_name)) @@ -34,8 +49,8 @@ class UnsupportedFilterFieldException(Exception): class UnsupportedFilterFieldValueException(Exception): def __init__( - self, filter_field_name, filter_field_value, allowed_filter_field_values, service_handler_class_name=None): - + self, filter_field_name, filter_field_value, allowed_filter_field_values, service_handler_class_name=None + ) -> None: if service_handler_class_name: msg = 'FilterField({:s}={:s}) specified by ServiceHandler({:s}) is not supported. Allowed values are {:s}' msg = msg.format( @@ -47,20 +62,19 @@ class UnsupportedFilterFieldValueException(Exception): super().__init__(msg) #class UnsupportedResourceKeyException(Exception): -# def __init__(self, resource_key): +# def __init__(self, resource_key) -> None: # msg = 'ResourceKey({:s}) not supported' # msg = msg.format(str(resource_key)) # super().__init__(msg) -# + #class ConfigFieldNotFoundException(Exception): -# def __init__(self, config_field_name): +# def __init__(self, config_field_name) -> None: # msg = 'ConfigField({:s}) not specified in resource' # msg = msg.format(str(config_field_name)) # super().__init__(msg) -# + #class ConfigFieldsNotSupportedException(Exception): -# def __init__(self, config_fields): +# def __init__(self, config_fields) -> None: # msg = 'ConfigFields({:s}) not supported in resource' # msg = msg.format(str(config_fields)) # super().__init__(msg) -# \ No newline at end of file diff --git a/src/service/service/service_handler_api/FilterFields.py b/src/service/service/service_handler_api/FilterFields.py index 473efa3e0..b0a5666a6 100644 --- a/src/service/service/service_handler_api/FilterFields.py +++ b/src/service/service/service_handler_api/FilterFields.py @@ -13,56 +13,26 @@ # limitations under the License. from enum import Enum -from common.proto.context_pb2 import DeviceDriverEnum, ServiceTypeEnum +from typing import Any, Dict, Optional +from common.proto.context_pb2 import Device, DeviceDriverEnum, Service, ServiceTypeEnum class FilterFieldEnum(Enum): SERVICE_TYPE = 'service_type' DEVICE_DRIVER = 'device_driver' -SERVICE_TYPE_VALUES = { - ServiceTypeEnum.SERVICETYPE_UNKNOWN, - ServiceTypeEnum.SERVICETYPE_L3NM, - ServiceTypeEnum.SERVICETYPE_L2NM, - ServiceTypeEnum.SERVICETYPE_L1NM, - ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, - ServiceTypeEnum.SERVICETYPE_TE, - ServiceTypeEnum.SERVICETYPE_E2E, - ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, - ServiceTypeEnum.SERVICETYPE_QKD, - ServiceTypeEnum.SERVICETYPE_INT, - ServiceTypeEnum.SERVICETYPE_ACL, - ServiceTypeEnum.SERVICETYPE_IP_LINK, - ServiceTypeEnum.SERVICETYPE_IPOWDM, - ServiceTypeEnum.SERVICETYPE_TAPI_LSP, -} - -DEVICE_DRIVER_VALUES = { - DeviceDriverEnum.DEVICEDRIVER_UNDEFINED, - DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG, - DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API, - DeviceDriverEnum.DEVICEDRIVER_P4, - DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, - DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532, - DeviceDriverEnum.DEVICEDRIVER_XR, - DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN, - DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG, - DeviceDriverEnum.DEVICEDRIVER_OPTICAL_TFS, - DeviceDriverEnum.DEVICEDRIVER_IETF_ACTN, - DeviceDriverEnum.DEVICEDRIVER_OC, - DeviceDriverEnum.DEVICEDRIVER_QKD, - DeviceDriverEnum.DEVICEDRIVER_IETF_L3VPN, - DeviceDriverEnum.DEVICEDRIVER_IETF_SLICE, - DeviceDriverEnum.DEVICEDRIVER_NCE, - DeviceDriverEnum.DEVICEDRIVER_SMARTNIC, - DeviceDriverEnum.DEVICEDRIVER_MORPHEUS, - DeviceDriverEnum.DEVICEDRIVER_RYU, - DeviceDriverEnum.DEVICEDRIVER_GNMI_NOKIA_SRLINUX, - DeviceDriverEnum.DEVICEDRIVER_OPENROADM, - DeviceDriverEnum.DEVICEDRIVER_RESTCONF_OPENCONFIG, -} - -# Map allowed filter fields to allowed values per Filter field. If no restriction (free text) None is specified +# Map allowed filter fields to allowed values per Filter field. +# If no restriction (free text) None is specified FILTER_FIELD_ALLOWED_VALUES = { - FilterFieldEnum.SERVICE_TYPE.value : SERVICE_TYPE_VALUES, - FilterFieldEnum.DEVICE_DRIVER.value : DEVICE_DRIVER_VALUES, + FilterFieldEnum.SERVICE_TYPE.value : set(ServiceTypeEnum.values()), + FilterFieldEnum.DEVICE_DRIVER.value : set(DeviceDriverEnum.values()), } + +def get_service_handler_filter_fields( + service : Optional[Service], device : Optional[Device] +) -> Dict[FilterFieldEnum, Any]: + if service is None: return {} + if device is None: return {} + return { + FilterFieldEnum.SERVICE_TYPE : service.service_type, + FilterFieldEnum.DEVICE_DRIVER : [driver for driver in device.device_drivers], + } diff --git a/src/service/service/service_handler_api/ServiceHandlerFactory.py b/src/service/service/service_handler_api/ServiceHandlerFactory.py index f998fe072..efd636ddd 100644 --- a/src/service/service/service_handler_api/ServiceHandlerFactory.py +++ b/src/service/service/service_handler_api/ServiceHandlerFactory.py @@ -12,93 +12,122 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, operator +import logging from enum import Enum -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Type from common.proto.context_pb2 import Device, DeviceDriverEnum, Service from common.tools.grpc.Tools import grpc_message_to_json_string from .Exceptions import ( - UnsatisfiedFilterException, UnsupportedServiceHandlerClassException, UnsupportedFilterFieldException, - UnsupportedFilterFieldValueException) + AmbiguousFilterException, EmptyFilterFieldException, + UnsatisfiedFilterException, UnsupportedServiceHandlerClassException, + UnsupportedFilterFieldException, UnsupportedFilterFieldValueException +) from .FilterFields import FILTER_FIELD_ALLOWED_VALUES, FilterFieldEnum if TYPE_CHECKING: - from service.service.service_handler_api._ServiceHandler import _ServiceHandler + from ._ServiceHandler import _ServiceHandler + LOGGER = logging.getLogger(__name__) +SUPPORTED_FILTER_FIELDS = set(FILTER_FIELD_ALLOWED_VALUES.keys()) + + +def check_is_class_valid(service_handler_class : Type['_ServiceHandler']) -> None: + from ._ServiceHandler import _ServiceHandler + if not issubclass(service_handler_class, _ServiceHandler): + raise UnsupportedServiceHandlerClassException(str(service_handler_class)) + +def sanitize_filter_fields( + filter_fields : Dict[FilterFieldEnum, Any], service_handler_name : Optional[str] = None +) -> Dict[FilterFieldEnum, Any]: + if len(filter_fields) == 0: + raise EmptyFilterFieldException( + filter_fields, service_handler_class_name=service_handler_name + ) + + unsupported_filter_fields = set(filter_fields.keys()).difference(SUPPORTED_FILTER_FIELDS) + if len(unsupported_filter_fields) > 0: + raise UnsupportedFilterFieldException( + unsupported_filter_fields, service_handler_class_name=service_handler_name + ) + + sanitized_filter_fields : Dict[FilterFieldEnum, Set[Any]] = dict() + for field_name, field_values in filter_fields.items(): + field_enum_values = FILTER_FIELD_ALLOWED_VALUES.get(field_name) + if not isinstance(field_values, Iterable) or isinstance(field_values, str): + field_values = [field_values] + + sanitized_field_values : Set[Any] = set() + for field_value in field_values: + if isinstance(field_value, Enum): field_value = field_value.value + if field_enum_values is not None and field_value not in field_enum_values: + raise UnsupportedFilterFieldValueException( + field_name, field_value, field_enum_values, + service_handler_class_name=service_handler_name + ) + sanitized_field_values.add(field_value) + + if len(sanitized_field_values) == 0: continue # do not add empty filters + sanitized_filter_fields[field_name] = sanitized_field_values + + return sanitized_filter_fields + + class ServiceHandlerFactory: - def __init__(self, service_handlers : List[Tuple[type, List[Dict[FilterFieldEnum, Any]]]]) -> None: - # Dict{field_name => Dict{field_value => Set{ServiceHandler}}} - self.__indices : Dict[str, Dict[str, Set['_ServiceHandler']]] = {} + def __init__( + self, service_handlers : List[Tuple[Type['_ServiceHandler'], List[Dict[FilterFieldEnum, Any]]]] + ) -> None: + self.__service_handlers : List[Tuple[Type['_ServiceHandler'], Dict[FilterFieldEnum, Any]]] = list() for service_handler_class,filter_field_sets in service_handlers: + check_is_class_valid(service_handler_class) + service_handler_name = service_handler_class.__name__ + for filter_fields in filter_field_sets: filter_fields = {k.value:v for k,v in filter_fields.items()} - self.register_service_handler_class(service_handler_class, **filter_fields) - - def register_service_handler_class(self, service_handler_class, **filter_fields): - from service.service.service_handler_api._ServiceHandler import _ServiceHandler - if not issubclass(service_handler_class, _ServiceHandler): - raise UnsupportedServiceHandlerClassException(str(service_handler_class)) - - service_handler_name = service_handler_class.__name__ - supported_filter_fields = set(FILTER_FIELD_ALLOWED_VALUES.keys()) - unsupported_filter_fields = set(filter_fields.keys()).difference(supported_filter_fields) - if len(unsupported_filter_fields) > 0: - raise UnsupportedFilterFieldException( - unsupported_filter_fields, service_handler_class_name=service_handler_name) - - for field_name, field_values in filter_fields.items(): - field_indice = self.__indices.setdefault(field_name, dict()) - field_enum_values = FILTER_FIELD_ALLOWED_VALUES.get(field_name) - if not isinstance(field_values, Iterable) or isinstance(field_values, str): - field_values = [field_values] - for field_value in field_values: - if isinstance(field_value, Enum): field_value = field_value.value - if field_enum_values is not None and field_value not in field_enum_values: - raise UnsupportedFilterFieldValueException( - field_name, field_value, field_enum_values, service_handler_class_name=service_handler_name) - field_indice_service_handlers = field_indice.setdefault(field_value, set()) - field_indice_service_handlers.add(service_handler_class) - - def get_service_handler_class(self, **filter_fields) -> '_ServiceHandler': - supported_filter_fields = set(FILTER_FIELD_ALLOWED_VALUES.keys()) - unsupported_filter_fields = set(filter_fields.keys()).difference(supported_filter_fields) - if len(unsupported_filter_fields) > 0: raise UnsupportedFilterFieldException(unsupported_filter_fields) - - candidate_service_handler_classes : Dict['_ServiceHandler', int] = None # num. filter hits per service_handler - for field_name, field_values in filter_fields.items(): - field_indice = self.__indices.get(field_name) - if field_indice is None: continue - if not isinstance(field_values, Iterable) or isinstance(field_values, str): - field_values = [field_values] - if len(field_values) == 0: - # do not allow empty fields; might cause wrong selection - raise UnsatisfiedFilterException(filter_fields) - - field_enum_values = FILTER_FIELD_ALLOWED_VALUES.get(field_name) - - field_candidate_service_handler_classes = set() - for field_value in field_values: - if field_enum_values is not None and field_value not in field_enum_values: - raise UnsupportedFilterFieldValueException(field_name, field_value, field_enum_values) - field_indice_service_handlers = field_indice.get(field_value) - if field_indice_service_handlers is None: continue - field_candidate_service_handler_classes = field_candidate_service_handler_classes.union( - field_indice_service_handlers) - - if candidate_service_handler_classes is None: - candidate_service_handler_classes = {k:1 for k in field_candidate_service_handler_classes} - else: - for candidate_service_handler_class in candidate_service_handler_classes: - if candidate_service_handler_class not in field_candidate_service_handler_classes: continue - candidate_service_handler_classes[candidate_service_handler_class] += 1 - - if len(candidate_service_handler_classes) == 0: raise UnsatisfiedFilterException(filter_fields) - candidate_service_handler_classes = sorted( - candidate_service_handler_classes.items(), key=operator.itemgetter(1), reverse=True) - return candidate_service_handler_classes[0][0] + filter_fields = sanitize_filter_fields( + filter_fields, service_handler_name=service_handler_name + ) + self.__service_handlers.append((service_handler_class, filter_fields)) + + + def is_service_handler_compatible( + self, service_handler_filter_fields : Dict[FilterFieldEnum, Any], + selection_filter_fields : Dict[FilterFieldEnum, Any] + ) -> bool: + # by construction empty service_handler_filter_fields are not allowed + # by construction empty selection_filter_fields are not allowed + for filter_field in SUPPORTED_FILTER_FIELDS: + service_handler_values = set(service_handler_filter_fields.get(filter_field, set())) + if service_handler_values is None : continue # means service_handler does not restrict + if len(service_handler_values) == 0: continue # means service_handler does not restrict + + selection_values = set(selection_filter_fields.get(filter_field, set())) + is_field_compatible = selection_values.issubset(service_handler_values) + if not is_field_compatible: return False + + return True + + + def get_service_handler_class(self, **selection_filter_fields) -> '_ServiceHandler': + sanitized_filter_fields = sanitize_filter_fields(selection_filter_fields) + + compatible_service_handlers : List[Tuple[Type[_ServiceHandler], Dict[FilterFieldEnum, Any]]] = [ + service_handler_class + for service_handler_class,service_handler_filter_fields in self.__service_handlers + if self.is_service_handler_compatible(service_handler_filter_fields, sanitized_filter_fields) + ] + + MSG = '[get_service_handler_class] compatible_service_handlers={:s}' + LOGGER.debug(MSG.format(str(compatible_service_handlers))) + + num_compatible = len(compatible_service_handlers) + if num_compatible == 0: + raise UnsatisfiedFilterException(selection_filter_fields) + if num_compatible > 1: + raise AmbiguousFilterException(selection_filter_fields, compatible_service_handlers) + return compatible_service_handlers[0] def get_common_device_drivers(drivers_per_device : List[Set[int]]) -> Set[int]: common_device_drivers = None -- GitLab From 73e315035afc959cb998d544bb498de30fcd67af Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 16 Jan 2026 12:07:43 +0000 Subject: [PATCH 11/79] End-to-end test - L2 VPN gNMI OpenConfig: - Added script to access CLI of R3 --- .../l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r3.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100755 src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r3.sh diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r3.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r3.sh new file mode 100755 index 000000000..d817ccffd --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r3.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker exec -it clab-l2_vpn_gnmi_oc-r3 Cli -- GitLab From 488b04dc50bcd8ddb61b06b03e0458dfcc013b93 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 16 Jan 2026 12:25:30 +0000 Subject: [PATCH 12/79] Service component - L2NM gNMI OpenConfig Service Handler: - Registered L2NM gNMI OpenConfig Service Handler in Service component --- src/service/service/service_handlers/__init__.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index 1d274490f..c30d5c308 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -16,6 +16,7 @@ from common.proto.context_pb2 import DeviceDriverEnum, ServiceTypeEnum from ..service_handler_api.FilterFields import FilterFieldEnum from .ipowdm.IpowdmServiceHandler import IpowdmServiceHandler from .l2nm_emulated.L2NMEmulatedServiceHandler import L2NMEmulatedServiceHandler +from .l2nm_gnmi_openconfig.L2NMGnmiOpenConfigServiceHandler import L2NMGnmiOpenConfigServiceHandler from .l2nm_ietfl2vpn.L2NM_IETFL2VPN_ServiceHandler import L2NM_IETFL2VPN_ServiceHandler from .l2nm_openconfig.L2NMOpenConfigServiceHandler import L2NMOpenConfigServiceHandler from .l3nm_emulated.L3NMEmulatedServiceHandler import L3NMEmulatedServiceHandler @@ -53,6 +54,12 @@ SERVICE_HANDLERS = [ FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG, } ]), + (L2NMGnmiOpenConfigServiceHandler, [ + { + FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, + FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG, + } + ]), (L3NMEmulatedServiceHandler, [ { FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L3NM, -- GitLab From 1b8a0a92633c1807941f775e1f67e63d4e64dc0a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 16 Jan 2026 22:30:25 +0000 Subject: [PATCH 13/79] Device component - gNMI OpenConfig Driver: - Fixed methods of unitary test --- .../gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py | 2 +- src/device/tests/gnmi_openconfig/tools/request_composers.py | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py index 4c73e0889..24341f7a8 100644 --- a/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py +++ b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py @@ -133,7 +133,7 @@ def test_configure_l2vpn_vpls(drivers: Dict[str, GnmiOpenConfigDriver]) -> None: """Fallback validation: create a VLAN in default VRF and attach core/access interfaces.""" for router in ROUTERS: driver = drivers[router['name']] - vlan_res = vlan('default', VLAN_ID, members=[], vlan_name='tfs-vlan') + vlan_res = vlan('default', VLAN_ID, vlan_name='tfs-vlan') if_access = interface(router['access_interface'], VLAN_ID, enabled=True, vlan_id=VLAN_ID, ipv4_address=None, ipv4_prefix=None) if_core = interface(router['core_interface'], VLAN_ID, enabled=True, vlan_id=VLAN_ID, diff --git a/src/device/tests/gnmi_openconfig/tools/request_composers.py b/src/device/tests/gnmi_openconfig/tools/request_composers.py index 198291d73..539f71ee6 100644 --- a/src/device/tests/gnmi_openconfig/tools/request_composers.py +++ b/src/device/tests/gnmi_openconfig/tools/request_composers.py @@ -110,14 +110,11 @@ def connection_point_endpoint_remote( } return str_path, str_data -def vlan(ni_name: str, vlan_id: int, members=None, vlan_name: str = None) -> Tuple[str, Dict]: - if members is None: - members = [] +def vlan(ni_name: str, vlan_id: int, vlan_name: str = None) -> Tuple[str, Dict]: str_path = '/network_instance[{:s}]/vlan[{:d}]'.format(ni_name, vlan_id) str_data = { 'name': ni_name, 'vlan_id': vlan_id, 'vlan_name': vlan_name, - 'members': members, } return str_path, str_data -- GitLab From 26d23b3ab76d8c3ddebdea0e2e02439ad95b2771 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 16 Jan 2026 22:31:45 +0000 Subject: [PATCH 14/79] Service component - L2NM gNMI OpenConfig Service Handler: - Removed unused Static Route Generator - Implemented VlanIdPropagator - Updated ConfigRuleComposer - Integrated VlanIdPropagator in Service Handler --- .../ConfigRuleComposer.py | 129 +++-------- .../L2NMGnmiOpenConfigServiceHandler.py | 14 +- .../StaticRouteGenerator.py | 215 ------------------ .../l2nm_gnmi_openconfig/VlanIdPropagator.py | 92 ++++++++ 4 files changed, 136 insertions(+), 314 deletions(-) delete mode 100644 src/service/service/service_handlers/l2nm_gnmi_openconfig/StaticRouteGenerator.py create mode 100644 src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py index cf0eacab5..5aa4ce21c 100644 --- a/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py @@ -16,6 +16,7 @@ import json, logging, netaddr, re from typing import Dict, List, Optional, Set, Tuple from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ConfigActionEnum, Device, EndPoint, Service +from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from service.service.service_handler_api.AnyTreeTools import TreeNode @@ -27,19 +28,15 @@ DEFAULT_NETWORK_INSTANCE = 'default' RE_IF = re.compile(r'^\/interface\[([^\]]+)\]$') RE_SUBIF = re.compile(r'^\/interface\[([^\]]+)\]\/subinterface\[([^\]]+)\]$') -RE_SR = re.compile(r'^\/network_instance\[([^\]]+)\]\/protocols\[STATIC\]/route\[([^\:]+)\:([^\]]+)\]$') def _interface( interface : str, if_type : Optional[str] = 'l3ipvlan', index : int = 0, vlan_id : Optional[int] = None, - address_ip : Optional[str] = None, address_prefix : Optional[int] = None, mtu : Optional[int] = None, - enabled : bool = True + mtu : Optional[int] = None, enabled : bool = True ) -> Tuple[str, Dict]: path = '/interface[{:s}]/subinterface[{:d}]'.format(interface, index) data = {'name': interface, 'type': if_type, 'index': index, 'enabled': enabled} if if_type is not None: data['type'] = if_type if vlan_id is not None: data['vlan_id'] = vlan_id - if address_ip is not None: data['address_ip'] = address_ip - if address_prefix is not None: data['address_prefix'] = address_prefix if mtu is not None: data['mtu'] = mtu return path, data @@ -48,24 +45,9 @@ def _network_instance(ni_name : str, ni_type : str) -> Tuple[str, Dict]: data = {'name': ni_name, 'type': ni_type} return path, data -def _network_instance_protocol(ni_name : str, protocol : str) -> Tuple[str, Dict]: - path = '/network_instance[{:s}]/protocols[{:s}]'.format(ni_name, protocol) - data = {'name': ni_name, 'identifier': protocol, 'protocol_name': protocol} - return path, data - -def _network_instance_protocol_static(ni_name : str) -> Tuple[str, Dict]: - return _network_instance_protocol(ni_name, 'STATIC') - -def _network_instance_protocol_static_route( - ni_name : str, prefix : str, next_hop : str, metric : int -) -> Tuple[str, Dict]: - protocol = 'STATIC' - path = '/network_instance[{:s}]/protocols[{:s}]/static_route[{:s}:{:d}]'.format(ni_name, protocol, prefix, metric) - index = 'AUTO_{:d}_{:s}'.format(metric, next_hop.replace('.', '-')) - data = { - 'name': ni_name, 'identifier': protocol, 'protocol_name': protocol, - 'prefix': prefix, 'index': index, 'next_hop': next_hop, 'metric': metric - } +def _network_instance_vlan(ni_name : str, vlan_id : int, vlan_name : str = None) -> Tuple[str, Dict]: + path = '/network_instance[{:s}]/vlan[{:s}]'.format(ni_name, str(vlan_id)) + data = {'name': ni_name, 'vlan_id': vlan_id, 'vlan_name': vlan_name} return path, data def _network_instance_interface(ni_name : str, interface : str, sub_interface_index : int) -> Tuple[str, Dict]: @@ -79,8 +61,7 @@ class EndpointComposer: self.uuid = endpoint_uuid self.objekt : Optional[EndPoint] = None self.sub_interface_index = 0 - self.ipv4_address = None - self.ipv4_prefix_len = None + self.vlan_id = None def configure(self, endpoint_obj : Optional[EndPoint], settings : Optional[TreeNode]) -> None: if endpoint_obj is not None: @@ -88,27 +69,12 @@ class EndpointComposer: if settings is None: return json_settings : Dict = settings.value - if 'address_ip' in json_settings: - self.ipv4_address = json_settings['address_ip'] - elif 'ip_address' in json_settings: - self.ipv4_address = json_settings['ip_address'] - else: - MSG = 'IP Address not found. Tried: address_ip and ip_address. endpoint_obj={:s} settings={:s}' - LOGGER.warning(MSG.format(str(endpoint_obj), str(settings))) - - if 'address_prefix' in json_settings: - self.ipv4_prefix_len = json_settings['address_prefix'] - elif 'prefix_length' in json_settings: - self.ipv4_prefix_len = json_settings['prefix_length'] - else: - MSG = 'IP Address Prefix not found. Tried: address_prefix and prefix_length. endpoint_obj={:s} settings={:s}' - LOGGER.warning(MSG.format(str(endpoint_obj), str(settings))) + if 'vlan_id' in json_settings: + self.vlan_id = json_settings['vlan_id'] self.sub_interface_index = json_settings.get('index', 0) def get_config_rules(self, network_instance_name : str, delete : bool = False) -> List[Dict]: - if self.ipv4_address is None: return [] - if self.ipv4_prefix_len is None: return [] json_config_rule = json_config_rule_delete if delete else json_config_rule_set config_rules : List[Dict] = list() @@ -120,24 +86,21 @@ class EndpointComposer: if delete: config_rules.extend([ json_config_rule(*_interface( - self.objekt.name, index=self.sub_interface_index, address_ip=None, - address_prefix=None, enabled=False + self.objekt.name, index=self.sub_interface_index, vlan_id=self.vlan_id, enabled=False )), ]) else: config_rules.extend([ json_config_rule(*_interface( - self.objekt.name, index=self.sub_interface_index, address_ip=self.ipv4_address, - address_prefix=self.ipv4_prefix_len, enabled=True + self.objekt.name, index=self.sub_interface_index, vlan_id=self.vlan_id, enabled=True )), ]) return config_rules def dump(self) -> Dict: return { - 'index' : self.sub_interface_index, - 'address_ip' : self.ipv4_address, - 'address_prefix': self.ipv4_prefix_len, + 'index' : self.sub_interface_index, + 'vlan_id' : self.vlan_id, } def __str__(self): @@ -152,8 +115,7 @@ class DeviceComposer: self.objekt : Optional[Device] = None self.aliases : Dict[str, str] = dict() # endpoint_name => endpoint_uuid self.endpoints : Dict[str, EndpointComposer] = dict() # endpoint_uuid => EndpointComposer - self.connected : Set[str] = set() - self.static_routes : Dict[str, Dict[int, str]] = dict() # {prefix => {metric => next_hop}} + self.vlan_ids : Set[int] = set() def set_endpoint_alias(self, endpoint_name : str, endpoint_uuid : str) -> None: self.aliases[endpoint_name] = endpoint_uuid @@ -195,34 +157,12 @@ class DeviceComposer: if_name, subif_index = match.groups() if if_name in mgmt_ifaces: continue resource_value = json.loads(config_rule_custom.resource_value) - if 'address_ip' not in resource_value: continue - if 'address_prefix' not in resource_value: continue - ipv4_network = str(resource_value['address_ip']) - ipv4_prefix_len = int(resource_value['address_prefix']) + if 'vlan_id' not in resource_value: continue + vlan_id = int(resource_value['vlan_id']) + self.vlan_ids.add(vlan_id) endpoint = self.get_endpoint(if_name) - endpoint.ipv4_address = ipv4_network - endpoint.ipv4_prefix_len = ipv4_prefix_len + endpoint.vlan_id = vlan_id endpoint.sub_interface_index = int(subif_index) - endpoint_ip_network = netaddr.IPNetwork('{:s}/{:d}'.format(ipv4_network, ipv4_prefix_len)) - if '0.0.0.0/' not in str(endpoint_ip_network.cidr): - self.connected.add(str(endpoint_ip_network.cidr)) - - match = RE_SR.match(config_rule_custom.resource_key) - if match is not None: - ni_name, prefix, metric = match.groups() - if ni_name != NETWORK_INSTANCE: continue - resource_value : Dict = json.loads(config_rule_custom.resource_value) - next_hop = resource_value['next_hop'] - self.static_routes.setdefault(prefix, dict())[metric] = next_hop - - if settings is None: return - json_settings : Dict = settings.value - static_routes : List[Dict] = json_settings.get('static_routes', []) - for static_route in static_routes: - prefix = static_route['prefix'] - next_hop = static_route['next_hop'] - metric = static_route.get('metric', 0) - self.static_routes.setdefault(prefix, dict())[metric] = next_hop def get_config_rules(self, network_instance_name : str, delete : bool = False) -> List[Dict]: SELECTED_DEVICES = { @@ -238,17 +178,11 @@ class DeviceComposer: json_config_rule(*_network_instance(network_instance_name, 'L3VRF')) for endpoint in self.endpoints.values(): config_rules.extend(endpoint.get_config_rules(network_instance_name, delete=delete)) - if len(self.static_routes) > 0: - config_rules.append( - json_config_rule(*_network_instance_protocol_static(network_instance_name)) - ) - for prefix, metric_next_hop in self.static_routes.items(): - for metric, next_hop in metric_next_hop.items(): - config_rules.append( - json_config_rule(*_network_instance_protocol_static_route( - network_instance_name, prefix, next_hop, metric - )) - ) + for vlan_id in self.vlan_ids: + vlan_name = 'tfs-vlan-{:s}'.format(str(vlan_id)) + config_rules.append(json_config_rule(*_network_instance_vlan( + network_instance_name, vlan_id, vlan_name=vlan_name + ))) if delete: config_rules = list(reversed(config_rules)) return config_rules @@ -257,9 +191,7 @@ class DeviceComposer: 'endpoints' : { endpoint_uuid : endpoint.dump() for endpoint_uuid, endpoint in self.endpoints.items() - }, - 'connected' : list(self.connected), - 'static_routes' : self.static_routes, + } } def __str__(self): @@ -273,6 +205,7 @@ class ConfigRuleComposer: self.objekt : Optional[Service] = None self.aliases : Dict[str, str] = dict() # device_name => device_uuid self.devices : Dict[str, DeviceComposer] = dict() # device_uuid => DeviceComposer + self.vlan_id = None def set_device_alias(self, device_name : str, device_uuid : str) -> None: self.aliases[device_name] = device_uuid @@ -286,8 +219,15 @@ class ConfigRuleComposer: def configure(self, service_obj : Service, settings : Optional[TreeNode]) -> None: self.objekt = service_obj if settings is None: return - #json_settings : Dict = settings.value - # For future use + json_settings : Dict = settings.value + + if 'vlan_id' in json_settings: + self.vlan_id = json_settings['vlan_id'] + elif 'vlan-id' in json_settings: + self.vlan_id = json_settings['vlan-id'] + else: + MSG = 'VLAN ID not found. Tried: vlan_id and vlan-id. service_obj={:s} settings={:s}' + LOGGER.warning(MSG.format(grpc_message_to_json_string(service_obj), str(settings))) def get_config_rules( self, network_instance_name : str = NETWORK_INSTANCE, delete : bool = False @@ -302,5 +242,6 @@ class ConfigRuleComposer: 'devices' : { device_uuid : device.dump() for device_uuid, device in self.devices.items() - } + }, + 'vlan_id': self.vlan_id, } diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/L2NMGnmiOpenConfigServiceHandler.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/L2NMGnmiOpenConfigServiceHandler.py index 125d4f759..baa164afa 100644 --- a/src/service/service/service_handlers/l2nm_gnmi_openconfig/L2NMGnmiOpenConfigServiceHandler.py +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/L2NMGnmiOpenConfigServiceHandler.py @@ -25,7 +25,7 @@ from service.service.service_handler_api.Tools import get_device_endpoint_uuids, from service.service.task_scheduler.TaskExecutor import TaskExecutor from service.service.tools.EndpointIdFormatters import endpointids_to_raw from .ConfigRuleComposer import ConfigRuleComposer -from .StaticRouteGenerator import StaticRouteGenerator +from .VlanIdPropagator import VlanIdPropagator LOGGER = logging.getLogger(__name__) @@ -39,7 +39,7 @@ class L2NMGnmiOpenConfigServiceHandler(_ServiceHandler): self.__task_executor = task_executor self.__settings_handler = SettingsHandler(service.service_config, **settings) self.__config_rule_composer = ConfigRuleComposer() - self.__static_route_generator = StaticRouteGenerator(self.__config_rule_composer) + self.__vlan_id_propagator = VlanIdPropagator(self.__config_rule_composer) self.__endpoint_map : Dict[Tuple[str, str], Tuple[str, str]] = dict() def _compose_config_rules(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> None: @@ -65,9 +65,13 @@ class L2NMGnmiOpenConfigServiceHandler(_ServiceHandler): self.__endpoint_map[(device_uuid, endpoint_uuid)] = (device_obj.name, endpoint_obj.name) - LOGGER.debug('[pre] config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump()))) - self.__static_route_generator.compose(endpoints) - LOGGER.debug('[post] config_rule_composer = {:s}'.format(json.dumps(self.__config_rule_composer.dump()))) + MSG = '[pre] config_rule_composer = {:s}' + LOGGER.debug(MSG.format(json.dumps(self.__config_rule_composer.dump()))) + + self.__vlan_id_propagator.compose(endpoints) + + MSG = '[post] config_rule_composer = {:s}' + LOGGER.debug(MSG.format(json.dumps(self.__config_rule_composer.dump()))) def _do_configurations( self, config_rules_per_device : Dict[str, List[Dict]], endpoints : List[Tuple[str, str, Optional[str]]], diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/StaticRouteGenerator.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/StaticRouteGenerator.py deleted file mode 100644 index 0d0d2bf52..000000000 --- a/src/service/service/service_handlers/l2nm_gnmi_openconfig/StaticRouteGenerator.py +++ /dev/null @@ -1,215 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json, logging, netaddr, sys -from typing import List, Optional, Tuple -from .ConfigRuleComposer import ConfigRuleComposer - -LOGGER = logging.getLogger(__name__) - -# Used to infer routing networks for adjacent ports when there is no hint in device/endpoint settings -ROOT_NEIGHBOR_ROUTING_NETWORK = netaddr.IPNetwork('10.254.254.0/16') -NEIGHBOR_ROUTING_NETWORKS_PREFIX_LEN = 30 -NEIGHBOR_ROUTING_NETWORKS = set(ROOT_NEIGHBOR_ROUTING_NETWORK.subnet(NEIGHBOR_ROUTING_NETWORKS_PREFIX_LEN)) - -def _generate_neighbor_addresses() -> Tuple[netaddr.IPAddress, netaddr.IPAddress, int]: - ip_network = NEIGHBOR_ROUTING_NETWORKS.pop() - ip_addresses = list(ip_network.iter_hosts()) - ip_addresses.append(NEIGHBOR_ROUTING_NETWORKS_PREFIX_LEN) - return ip_addresses - -def _compute_gateway(ip_network : netaddr.IPNetwork, gateway_host=1) -> netaddr.IPAddress: - return netaddr.IPAddress(ip_network.cidr.first + gateway_host) - -def _compose_ipv4_network(ipv4_network, ipv4_prefix_len) -> netaddr.IPNetwork: - return netaddr.IPNetwork('{:s}/{:d}'.format(str(ipv4_network), int(ipv4_prefix_len))) - -class StaticRouteGenerator: - def __init__(self, config_rule_composer : ConfigRuleComposer) -> None: - self._config_rule_composer = config_rule_composer - - def compose(self, connection_hop_list : List[Tuple[str, str, Optional[str]]]) -> None: - link_endpoints = self._compute_link_endpoints(connection_hop_list) - LOGGER.debug('link_endpoints = {:s}'.format(str(link_endpoints))) - - self._compute_link_addresses(link_endpoints) - LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self._config_rule_composer.dump()))) - - self._discover_connected_networks(connection_hop_list) - LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self._config_rule_composer.dump()))) - - # Compute and propagate static routes forward (service_endpoint_a => service_endpoint_b) - self._compute_static_routes(link_endpoints) - - # Compute and propagate static routes backward (service_endpoint_b => service_endpoint_a) - reversed_endpoints = list(reversed(connection_hop_list)) - reversed_link_endpoints = self._compute_link_endpoints(reversed_endpoints) - LOGGER.debug('reversed_link_endpoints = {:s}'.format(str(reversed_link_endpoints))) - self._compute_static_routes(reversed_link_endpoints) - - LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self._config_rule_composer.dump()))) - - def _compute_link_endpoints( - self, connection_hop_list : List[Tuple[str, str, Optional[str]]] - ) -> List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]]: - # In some cases connection_hop_list might contain repeated endpoints, remove them here. - added_connection_hops = set() - filtered_connection_hop_list = list() - for connection_hop in connection_hop_list: - if connection_hop in added_connection_hops: continue - filtered_connection_hop_list.append(connection_hop) - added_connection_hops.add(connection_hop) - connection_hop_list = filtered_connection_hop_list - - # In some cases connection_hop_list first and last items might be internal endpoints of - # devices instead of link endpoints. Filter those endpoints not reaching a new device. - if len(connection_hop_list) > 2 and connection_hop_list[0][0] == connection_hop_list[1][0]: - # same device on first 2 endpoints - connection_hop_list = connection_hop_list[1:] - if len(connection_hop_list) > 2 and connection_hop_list[-1][0] == connection_hop_list[-2][0]: - # same device on last 2 endpoints - connection_hop_list = connection_hop_list[:-1] - - num_connection_hops = len(connection_hop_list) - if num_connection_hops % 2 != 0: raise Exception('Number of connection hops must be even') - if num_connection_hops < 4: raise Exception('Number of connection hops must be >= 4') - - it_connection_hops = iter(connection_hop_list) - return list(zip(it_connection_hops, it_connection_hops)) - - def _compute_link_addresses( - self, link_endpoints_list : List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]] - ) -> None: - for link_endpoints in link_endpoints_list: - device_endpoint_a, device_endpoint_b = link_endpoints - - device_uuid_a, endpoint_uuid_a = device_endpoint_a[0:2] - endpoint_a = self._config_rule_composer.get_device(device_uuid_a).get_endpoint(endpoint_uuid_a) - - device_uuid_b, endpoint_uuid_b = device_endpoint_b[0:2] - endpoint_b = self._config_rule_composer.get_device(device_uuid_b).get_endpoint(endpoint_uuid_b) - - if endpoint_a.ipv4_address is None and endpoint_b.ipv4_address is None: - ip_endpoint_a, ip_endpoint_b, prefix_len = _generate_neighbor_addresses() - endpoint_a.ipv4_address = str(ip_endpoint_a) - endpoint_a.ipv4_prefix_len = prefix_len - endpoint_b.ipv4_address = str(ip_endpoint_b) - endpoint_b.ipv4_prefix_len = prefix_len - elif endpoint_a.ipv4_address is not None and endpoint_b.ipv4_address is None: - prefix_len = endpoint_a.ipv4_prefix_len - ip_network_a = _compose_ipv4_network(endpoint_a.ipv4_address, prefix_len) - if prefix_len > 30: - MSG = 'Unsupported prefix_len for {:s}: {:s}' - raise Exception(MSG.format(str(endpoint_a), str(prefix_len))) - ip_endpoint_b = _compute_gateway(ip_network_a, gateway_host=1) - if ip_endpoint_b == ip_network_a.ip: - ip_endpoint_b = _compute_gateway(ip_network_a, gateway_host=2) - endpoint_b.ipv4_address = str(ip_endpoint_b) - endpoint_b.ipv4_prefix_len = prefix_len - elif endpoint_a.ipv4_address is None and endpoint_b.ipv4_address is not None: - prefix_len = endpoint_b.ipv4_prefix_len - ip_network_b = _compose_ipv4_network(endpoint_b.ipv4_address, prefix_len) - if prefix_len > 30: - MSG = 'Unsupported prefix_len for {:s}: {:s}' - raise Exception(MSG.format(str(endpoint_b), str(prefix_len))) - ip_endpoint_a = _compute_gateway(ip_network_b, gateway_host=1) - if ip_endpoint_a == ip_network_b.ip: - ip_endpoint_a = _compute_gateway(ip_network_b, gateway_host=2) - endpoint_a.ipv4_address = str(ip_endpoint_a) - endpoint_a.ipv4_prefix_len = prefix_len - elif endpoint_a.ipv4_address is not None and endpoint_b.ipv4_address is not None: - ip_network_a = _compose_ipv4_network(endpoint_a.ipv4_address, endpoint_a.ipv4_prefix_len) - ip_network_b = _compose_ipv4_network(endpoint_b.ipv4_address, endpoint_b.ipv4_prefix_len) - if ip_network_a.cidr != ip_network_b.cidr: - MSG = 'Incompatible CIDRs: endpoint_a({:s})=>{:s} endpoint_b({:s})=>{:s}' - raise Exception(MSG.format(str(endpoint_a), str(ip_network_a), str(endpoint_b), str(ip_network_b))) - if ip_network_a.ip == ip_network_b.ip: - MSG = 'Duplicated IP: endpoint_a({:s})=>{:s} endpoint_b({:s})=>{:s}' - raise Exception(MSG.format(str(endpoint_a), str(ip_network_a), str(endpoint_b), str(ip_network_b))) - - def _discover_connected_networks(self, connection_hop_list : List[Tuple[str, str, Optional[str]]]) -> None: - for connection_hop in connection_hop_list: - device_uuid, endpoint_uuid = connection_hop[0:2] - device = self._config_rule_composer.get_device(device_uuid) - endpoint = device.get_endpoint(endpoint_uuid) - - if endpoint.ipv4_address is None: continue - ip_network = _compose_ipv4_network(endpoint.ipv4_address, endpoint.ipv4_prefix_len) - - if '0.0.0.0/' in str(ip_network.cidr): continue - device.connected.add(str(ip_network.cidr)) - - def _compute_static_routes( - self, link_endpoints_list : List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]] - ) -> None: - for link_endpoints in link_endpoints_list: - device_endpoint_a, device_endpoint_b = link_endpoints - - device_uuid_a, endpoint_uuid_a = device_endpoint_a[0:2] - device_a = self._config_rule_composer.get_device(device_uuid_a) - endpoint_a = device_a.get_endpoint(endpoint_uuid_a) - - device_uuid_b, endpoint_uuid_b = device_endpoint_b[0:2] - device_b = self._config_rule_composer.get_device(device_uuid_b) - endpoint_b = device_b.get_endpoint(endpoint_uuid_b) - - # Compute static routes from networks connected in device_a - for ip_network_a in device_a.connected: - if ip_network_a in device_b.connected: continue - if ip_network_a in device_b.static_routes: continue - if ip_network_a in ROOT_NEIGHBOR_ROUTING_NETWORK: continue - endpoint_a_ip_network = _compose_ipv4_network(endpoint_a.ipv4_address, endpoint_a.ipv4_prefix_len) - next_hop = str(endpoint_a_ip_network.ip) - metric = 1 - device_b.static_routes.setdefault(ip_network_a, dict())[metric] = next_hop - - # Compute static routes from networks connected in device_b - for ip_network_b in device_b.connected: - if ip_network_b in device_a.connected: continue - if ip_network_b in device_a.static_routes: continue - if ip_network_b in ROOT_NEIGHBOR_ROUTING_NETWORK: continue - endpoint_b_ip_network = _compose_ipv4_network(endpoint_b.ipv4_address, endpoint_b.ipv4_prefix_len) - next_hop = str(endpoint_b_ip_network.ip) - metric = 1 - device_a.static_routes.setdefault(ip_network_b, dict())[metric] = next_hop - - # Propagate static routes from networks connected in device_a - for ip_network_a, metric_next_hop in device_a.static_routes.items(): - if ip_network_a in device_b.connected: continue - if ip_network_a in ROOT_NEIGHBOR_ROUTING_NETWORK: continue - endpoint_a_ip_network = _compose_ipv4_network(endpoint_a.ipv4_address, endpoint_a.ipv4_prefix_len) - if ip_network_a in device_b.static_routes: - current_metric = min(device_b.static_routes[ip_network_a].keys()) - else: - current_metric = int(sys.float_info.max) - for metric, next_hop in metric_next_hop.items(): - new_metric = metric + 1 - if new_metric >= current_metric: continue - next_hop_a = str(endpoint_a_ip_network.ip) - device_b.static_routes.setdefault(ip_network_a, dict())[metric] = next_hop_a - - # Propagate static routes from networks connected in device_b - for ip_network_b in device_b.static_routes.keys(): - if ip_network_b in device_a.connected: continue - if ip_network_b in ROOT_NEIGHBOR_ROUTING_NETWORK: continue - endpoint_b_ip_network = _compose_ipv4_network(endpoint_b.ipv4_address, endpoint_b.ipv4_prefix_len) - if ip_network_b in device_a.static_routes: - current_metric = min(device_a.static_routes[ip_network_b].keys()) - else: - current_metric = int(sys.float_info.max) - for metric, next_hop in metric_next_hop.items(): - new_metric = metric + 1 - if new_metric >= current_metric: continue - next_hop_b = str(endpoint_b_ip_network.ip) - device_a.static_routes.setdefault(ip_network_b, dict())[metric] = next_hop_b diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py new file mode 100644 index 000000000..73addf06c --- /dev/null +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py @@ -0,0 +1,92 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import List, Optional, Tuple +from .ConfigRuleComposer import ConfigRuleComposer + +LOGGER = logging.getLogger(__name__) + +class VlanIdPropagator: + def __init__(self, config_rule_composer : ConfigRuleComposer) -> None: + self._config_rule_composer = config_rule_composer + + def compose(self, connection_hop_list : List[Tuple[str, str, Optional[str]]]) -> None: + link_endpoints = self._compute_link_endpoints(connection_hop_list) + LOGGER.debug('link_endpoints = {:s}'.format(str(link_endpoints))) + + self._propagate_vlan_id(link_endpoints) + LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self._config_rule_composer.dump()))) + + def _compute_link_endpoints( + self, connection_hop_list : List[Tuple[str, str, Optional[str]]] + ) -> List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]]: + # In some cases connection_hop_list might contain repeated endpoints, remove them here. + added_connection_hops = set() + filtered_connection_hop_list = list() + for connection_hop in connection_hop_list: + if connection_hop in added_connection_hops: continue + filtered_connection_hop_list.append(connection_hop) + added_connection_hops.add(connection_hop) + connection_hop_list = filtered_connection_hop_list + + # In some cases connection_hop_list first and last items might be internal endpoints of + # devices instead of link endpoints. Filter those endpoints not reaching a new device. + if len(connection_hop_list) > 2 and connection_hop_list[0][0] == connection_hop_list[1][0]: + # same device on first 2 endpoints + connection_hop_list = connection_hop_list[1:] + if len(connection_hop_list) > 2 and connection_hop_list[-1][0] == connection_hop_list[-2][0]: + # same device on last 2 endpoints + connection_hop_list = connection_hop_list[:-1] + + num_connection_hops = len(connection_hop_list) + if num_connection_hops % 2 != 0: raise Exception('Number of connection hops must be even') + if num_connection_hops < 4: raise Exception('Number of connection hops must be >= 4') + + it_connection_hops = iter(connection_hop_list) + return list(zip(it_connection_hops, it_connection_hops)) + + def _propagate_vlan_id( + self, link_endpoints_list : List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]] + ) -> None: + for link_endpoints in link_endpoints_list: + device_endpoint_a, device_endpoint_b = link_endpoints + + device_uuid_a, endpoint_uuid_a = device_endpoint_a[0:2] + endpoint_a = self._config_rule_composer.get_device(device_uuid_a).get_endpoint(endpoint_uuid_a) + + device_uuid_b, endpoint_uuid_b = device_endpoint_b[0:2] + endpoint_b = self._config_rule_composer.get_device(device_uuid_b).get_endpoint(endpoint_uuid_b) + + svc_vlan_id = self._config_rule_composer.vlan_id + ep_a_vlan_id = endpoint_a.vlan_id + ep_b_vlan_id = endpoint_b.vlan_id + + if ep_a_vlan_id is None and ep_b_vlan_id is None: + endpoint_a.vlan_id = svc_vlan_id + endpoint_b.vlan_id = svc_vlan_id + elif ep_a_vlan_id is not None and ep_b_vlan_id is None: + if ep_a_vlan_id != svc_vlan_id: + MSG = 'Incompatible VLAN-IDs: endpoint_a({:s}), service({:s})' + raise Exception(MSG.format(str(endpoint_a), str(svc_vlan_id))) + endpoint_b.vlan_id = svc_vlan_id + elif ep_a_vlan_id is None and ep_b_vlan_id is not None: + if ep_b_vlan_id != svc_vlan_id: + MSG = 'Incompatible VLAN-IDs: endpoint_b({:s}), service({:s})' + raise Exception(MSG.format(str(endpoint_b), str(svc_vlan_id))) + endpoint_a.vlan_id = svc_vlan_id + elif ep_a_vlan_id is not None and ep_b_vlan_id is not None: + if ep_a_vlan_id != svc_vlan_id or ep_b_vlan_id != svc_vlan_id: + MSG = 'Incompatible VLAN-IDs: endpoint_a({:s}), endpoint_a({:s}), service({:s})' + raise Exception(MSG.format(str(endpoint_a), str(endpoint_b), str(svc_vlan_id))) -- GitLab From 8271c8f5845b6c2958a81f197e76b5e83a94136a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 16 Jan 2026 22:32:06 +0000 Subject: [PATCH 15/79] Service component - L3NM gNMI OpenConfig Service Handler: - Minor logging fix --- .../l3nm_gnmi_openconfig/ConfigRuleComposer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py b/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py index cf0eacab5..6857bce61 100644 --- a/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py +++ b/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py @@ -16,6 +16,7 @@ import json, logging, netaddr, re from typing import Dict, List, Optional, Set, Tuple from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ConfigActionEnum, Device, EndPoint, Service +from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from service.service.service_handler_api.AnyTreeTools import TreeNode @@ -94,7 +95,7 @@ class EndpointComposer: self.ipv4_address = json_settings['ip_address'] else: MSG = 'IP Address not found. Tried: address_ip and ip_address. endpoint_obj={:s} settings={:s}' - LOGGER.warning(MSG.format(str(endpoint_obj), str(settings))) + LOGGER.warning(MSG.format(grpc_message_to_json_string(endpoint_obj), str(settings))) if 'address_prefix' in json_settings: self.ipv4_prefix_len = json_settings['address_prefix'] @@ -102,7 +103,7 @@ class EndpointComposer: self.ipv4_prefix_len = json_settings['prefix_length'] else: MSG = 'IP Address Prefix not found. Tried: address_prefix and prefix_length. endpoint_obj={:s} settings={:s}' - LOGGER.warning(MSG.format(str(endpoint_obj), str(settings))) + LOGGER.warning(MSG.format(grpc_message_to_json_string(endpoint_obj), str(settings))) self.sub_interface_index = json_settings.get('index', 0) -- GitLab From 726462087125bd857084c0558d238683ff213fd5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 16 Jan 2026 22:42:26 +0000 Subject: [PATCH 16/79] End-to-end test - L2 VPN gNMI OpenConfig: - Updated TFS service descriptor --- src/tests/l2_vpn_gnmi_oc/data/tfs-service.json | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-service.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-service.json index f7b589e04..33163741d 100644 --- a/src/tests/l2_vpn_gnmi_oc/data/tfs-service.json +++ b/src/tests/l2_vpn_gnmi_oc/data/tfs-service.json @@ -9,7 +9,13 @@ "service_endpoint_ids": [ {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "int"}}, {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "int"}} - ] + ], + "service_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": { + "resource_key": "/settings", + "resource_value": {"vlan_id": 125} + }} + ]} } ] } -- GitLab From 12a685f0b0aa83922fe7468bdda81c80ea0ff35d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sat, 17 Jan 2026 17:51:13 +0000 Subject: [PATCH 17/79] Service component - L2NM gNMI OpenConfig Service Handler: - Fixed VlanIdPropagator - Fixed ConfigRuleComposer --- .../ConfigRuleComposer.py | 33 ++++++++++++------- .../l2nm_gnmi_openconfig/VlanIdPropagator.py | 10 ++++-- 2 files changed, 30 insertions(+), 13 deletions(-) diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py index 5aa4ce21c..03ae1211b 100644 --- a/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py @@ -60,7 +60,6 @@ class EndpointComposer: def __init__(self, endpoint_uuid : str) -> None: self.uuid = endpoint_uuid self.objekt : Optional[EndPoint] = None - self.sub_interface_index = 0 self.vlan_id = None def configure(self, endpoint_obj : Optional[EndPoint], settings : Optional[TreeNode]) -> None: @@ -71,35 +70,42 @@ class EndpointComposer: if 'vlan_id' in json_settings: self.vlan_id = json_settings['vlan_id'] - - self.sub_interface_index = json_settings.get('index', 0) + elif 'vlan-id' in json_settings: + self.vlan_id = json_settings['vlan-id'] + else: + MSG = 'VLAN ID not found. Tried: vlan_id and vlan-id. endpoint_obj={:s} settings={:s}' + LOGGER.warning(MSG.format(grpc_message_to_json_string(endpoint_obj), str(settings))) def get_config_rules(self, network_instance_name : str, delete : bool = False) -> List[Dict]: + config_rules : List[Dict] = list() + if self.vlan_id is None: + MSG = 'VLAN ID not defined for endpoint_obj={:s}' + LOGGER.warning(MSG.format(grpc_message_to_json_string(self.objekt))) + return config_rules + json_config_rule = json_config_rule_delete if delete else json_config_rule_set - config_rules : List[Dict] = list() if network_instance_name != DEFAULT_NETWORK_INSTANCE: config_rules.append(json_config_rule(*_network_instance_interface( - network_instance_name, self.objekt.name, self.sub_interface_index + network_instance_name, self.objekt.name, self.vlan_id ))) if delete: config_rules.extend([ json_config_rule(*_interface( - self.objekt.name, index=self.sub_interface_index, vlan_id=self.vlan_id, enabled=False + self.objekt.name, index=self.vlan_id, vlan_id=self.vlan_id, enabled=False )), ]) else: config_rules.extend([ json_config_rule(*_interface( - self.objekt.name, index=self.sub_interface_index, vlan_id=self.vlan_id, enabled=True + self.objekt.name, index=self.vlan_id, vlan_id=self.vlan_id, enabled=True )), ]) return config_rules def dump(self) -> Dict: return { - 'index' : self.sub_interface_index, 'vlan_id' : self.vlan_id, } @@ -154,7 +160,7 @@ class DeviceComposer: match = RE_SUBIF.match(config_rule_custom.resource_key) if match is not None: - if_name, subif_index = match.groups() + if_name, _ = match.groups() if if_name in mgmt_ifaces: continue resource_value = json.loads(config_rule_custom.resource_value) if 'vlan_id' not in resource_value: continue @@ -162,7 +168,6 @@ class DeviceComposer: self.vlan_ids.add(vlan_id) endpoint = self.get_endpoint(if_name) endpoint.vlan_id = vlan_id - endpoint.sub_interface_index = int(subif_index) def get_config_rules(self, network_instance_name : str, delete : bool = False) -> List[Dict]: SELECTED_DEVICES = { @@ -191,7 +196,8 @@ class DeviceComposer: 'endpoints' : { endpoint_uuid : endpoint.dump() for endpoint_uuid, endpoint in self.endpoints.items() - } + }, + 'vlan_ids' : list(self.vlan_ids) } def __str__(self): @@ -232,6 +238,11 @@ class ConfigRuleComposer: def get_config_rules( self, network_instance_name : str = NETWORK_INSTANCE, delete : bool = False ) -> Dict[str, List[Dict]]: + if self.vlan_id is None: + MSG = 'VLAN ID not defined for service_obj={:s}' + LOGGER.warning(MSG.format(grpc_message_to_json_string(self.objekt))) + return dict() + return { device_uuid : device.get_config_rules(network_instance_name, delete=delete) for device_uuid, device in self.devices.items() diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py index 73addf06c..da810651f 100644 --- a/src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py @@ -64,27 +64,33 @@ class VlanIdPropagator: device_endpoint_a, device_endpoint_b = link_endpoints device_uuid_a, endpoint_uuid_a = device_endpoint_a[0:2] - endpoint_a = self._config_rule_composer.get_device(device_uuid_a).get_endpoint(endpoint_uuid_a) + device_a = self._config_rule_composer.get_device(device_uuid_a) + endpoint_a = device_a.get_endpoint(endpoint_uuid_a) device_uuid_b, endpoint_uuid_b = device_endpoint_b[0:2] - endpoint_b = self._config_rule_composer.get_device(device_uuid_b).get_endpoint(endpoint_uuid_b) + device_b = self._config_rule_composer.get_device(device_uuid_b) + endpoint_b = device_b.get_endpoint(endpoint_uuid_b) svc_vlan_id = self._config_rule_composer.vlan_id ep_a_vlan_id = endpoint_a.vlan_id ep_b_vlan_id = endpoint_b.vlan_id if ep_a_vlan_id is None and ep_b_vlan_id is None: + device_a.vlan_ids.add(svc_vlan_id) endpoint_a.vlan_id = svc_vlan_id + device_b.vlan_ids.add(svc_vlan_id) endpoint_b.vlan_id = svc_vlan_id elif ep_a_vlan_id is not None and ep_b_vlan_id is None: if ep_a_vlan_id != svc_vlan_id: MSG = 'Incompatible VLAN-IDs: endpoint_a({:s}), service({:s})' raise Exception(MSG.format(str(endpoint_a), str(svc_vlan_id))) + device_b.vlan_ids.add(svc_vlan_id) endpoint_b.vlan_id = svc_vlan_id elif ep_a_vlan_id is None and ep_b_vlan_id is not None: if ep_b_vlan_id != svc_vlan_id: MSG = 'Incompatible VLAN-IDs: endpoint_b({:s}), service({:s})' raise Exception(MSG.format(str(endpoint_b), str(svc_vlan_id))) + device_a.vlan_ids.add(svc_vlan_id) endpoint_a.vlan_id = svc_vlan_id elif ep_a_vlan_id is not None and ep_b_vlan_id is not None: if ep_a_vlan_id != svc_vlan_id or ep_b_vlan_id != svc_vlan_id: -- GitLab From 59ed1c7924a80b587beba7d7aa5bc3fe70ad2bc8 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sat, 17 Jan 2026 17:51:44 +0000 Subject: [PATCH 18/79] End-to-end test - L2 VPN gNMI OpenConfig: - Updated TFS service descriptors (VLAN 100 and VLAN 125) --- .../data/tfs-service-vlan-100.json | 21 +++++++++++++++++++ ...service.json => tfs-service-vlan-125.json} | 2 +- 2 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100.json rename src/tests/l2_vpn_gnmi_oc/data/{tfs-service.json => tfs-service-vlan-125.json} (94%) diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100.json new file mode 100644 index 000000000..0de688f2c --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100.json @@ -0,0 +1,21 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "tfs-l2vpn-svc-100"} + }, + "service_type": "SERVICETYPE_L2NM", + "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "int"}}, + {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "int"}} + ], + "service_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": { + "resource_key": "/settings", + "resource_value": {"vlan_id": 100} + }} + ]} + } + ] +} diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-service.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125.json similarity index 94% rename from src/tests/l2_vpn_gnmi_oc/data/tfs-service.json rename to src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125.json index 33163741d..f70319555 100644 --- a/src/tests/l2_vpn_gnmi_oc/data/tfs-service.json +++ b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125.json @@ -2,7 +2,7 @@ "services": [ { "service_id": { - "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "tfs-l2vpn-svc"} + "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "tfs-l2vpn-svc-125"} }, "service_type": "SERVICETYPE_L2NM", "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, -- GitLab From 647ad1b7fde4674110ed98f79e262ee21e9406d5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sat, 17 Jan 2026 21:06:12 +0000 Subject: [PATCH 19/79] Device component - gNMI OpenConfig Driver: - Implemented Interface Switched Vlan handler --- .../handlers/InterfaceSwitchedVlan.py | 104 ++++++++++++++++++ .../gnmi_openconfig/handlers/__init__.py | 7 ++ 2 files changed, 111 insertions(+) create mode 100644 src/device/service/drivers/gnmi_openconfig/handlers/InterfaceSwitchedVlan.py diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/InterfaceSwitchedVlan.py b/src/device/service/drivers/gnmi_openconfig/handlers/InterfaceSwitchedVlan.py new file mode 100644 index 000000000..7a782b881 --- /dev/null +++ b/src/device/service/drivers/gnmi_openconfig/handlers/InterfaceSwitchedVlan.py @@ -0,0 +1,104 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, re +from typing import Any, Dict, List, Tuple +from ._Handler import _Handler +from .Tools import get_str +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +RE_IF_SWITCHED_VLAN = re.compile(r'^/interface\[(?:name=)?([^\]]+)\]/ethernet/switched-vlan$') + +class InterfaceSwitchedVlanHandler(_Handler): + def get_resource_key(self) -> str: return '/interface/ethernet/switched-vlan' + def get_path(self) -> str: return '/openconfig-interfaces:interfaces/interface/ethernet/switched-vlan' + + def _get_interface_name(self, resource_key : str, resource_value : Dict) -> str: + if 'name' in resource_value: + return get_str(resource_value, 'name') + if 'interface' in resource_value: + return get_str(resource_value, 'interface') + match = RE_IF_SWITCHED_VLAN.match(resource_key) + if match is None: + MSG = 'Interface name not found in resource_key={:s} resource_value={:s}' + raise Exception(MSG.format(str(resource_key), str(resource_value))) + return match.groups()[0] + + def _normalize_config(self, resource_value : Dict) -> Dict[str, Any]: + config = resource_value.get('config') + if isinstance(config, dict): + return config + + interface_mode = resource_value.get('interface-mode', resource_value.get('interface_mode')) + if interface_mode is None: + raise Exception('interface-mode is required for switched-vlan config') + interface_mode = str(interface_mode).upper() + + config = {'interface-mode': interface_mode} + if interface_mode == 'ACCESS': + access_vlan = resource_value.get('access-vlan', resource_value.get('access_vlan')) + if access_vlan is None: + raise Exception('access-vlan is required for ACCESS mode') + config['access-vlan'] = int(access_vlan) + elif interface_mode == 'TRUNK': + native_vlan = resource_value.get('native-vlan', resource_value.get('native_vlan', 1)) + config['native-vlan'] = int(native_vlan) + trunk_vlans = resource_value.get('trunk-vlans', resource_value.get('trunk_vlans')) + if trunk_vlans is None: + trunk_vlan = resource_value.get('trunk-vlan', resource_value.get('trunk_vlan')) + trunk_vlans = [trunk_vlan] if trunk_vlan is not None else [] + if not isinstance(trunk_vlans, list): + trunk_vlans = [trunk_vlans] + config['trunk-vlans'] = [int(vlan) for vlan in trunk_vlans if vlan is not None] + else: + raise Exception('Unsupported interface-mode: {:s}'.format(str(interface_mode))) + + return config + + def compose( + self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False + ) -> Tuple[str, str]: + if_name = self._get_interface_name(resource_key, resource_value) + str_path = '/interfaces/interface[name={:s}]/ethernet/switched-vlan'.format(if_name) + if delete: + return str_path, json.dumps({}) + + config = self._normalize_config(resource_value) + str_data = json.dumps({'config': config}) + return str_path, str_data + + def parse( + self, json_data : Dict, yang_handler : YangHandler + ) -> List[Tuple[str, Dict[str, Any]]]: + json_data_valid = yang_handler.parse_to_dict( + '/openconfig-interfaces:interfaces', json_data, fmt='json', strict=False + ) + + entries = [] + for interface in json_data_valid.get('interfaces', {}).get('interface', []): + interface_name = interface['name'] + ethernet = interface.get('ethernet', {}) + switched_vlan = ethernet.get('switched-vlan') + if switched_vlan is None: + continue + entry_key = '/interface[{:s}]/ethernet/switched-vlan'.format(interface_name) + entry_value = {} + if 'config' in switched_vlan: + entry_value['config'] = switched_vlan['config'] + if 'state' in switched_vlan: + entry_value['state'] = switched_vlan['state'] + entries.append((entry_key, entry_value)) + return entries diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py b/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py index 61ce7e675..ab09a6c54 100644 --- a/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py +++ b/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py @@ -18,6 +18,7 @@ from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_INTER from ._Handler import _Handler from .Component import ComponentHandler from .Interface import InterfaceHandler +from .InterfaceSwitchedVlan import InterfaceSwitchedVlanHandler from .InterfaceCounter import InterfaceCounterHandler from .NetworkInstance import NetworkInstanceHandler from .NetworkInstanceInterface import NetworkInstanceInterfaceHandler @@ -35,6 +36,7 @@ LOGGER = logging.getLogger(__name__) comph = ComponentHandler() ifaceh = InterfaceHandler() +ifsvh = InterfaceSwitchedVlanHandler() ifctrh = InterfaceCounterHandler() nih = NetworkInstanceHandler() niifh = NetworkInstanceInterfaceHandler() @@ -58,6 +60,7 @@ RESOURCE_KEY_MAPPER = { RESOURCE_INTERFACES : ifaceh.get_resource_key(), RESOURCE_NETWORK_INSTANCES : nih.get_resource_key(), '/interface' : ifaceh.get_resource_key(), + '/interface/ethernet/switched-vlan' : ifsvh.get_resource_key(), '/mpls' : mplsh.get_resource_key(), '/network_instance/vlan' : nivlh.get_resource_key(), '/mpls/interface' : mplsh.get_resource_key(), @@ -68,6 +71,8 @@ PATH_MAPPER = { '/components' : comph.get_path(), '/components/component' : comph.get_path(), '/interfaces' : ifaceh.get_path(), + '/interfaces/interface/ethernet/switched-vlan' + : ifsvh.get_path(), '/network-instances' : nih.get_path(), '/network-instances/network-instance/connection-points/connection-point' : nicph.get_path(), @@ -84,6 +89,7 @@ PATH_MAPPER = { RESOURCE_KEY_TO_HANDLER = { comph.get_resource_key() : comph, ifaceh.get_resource_key() : ifaceh, + ifsvh.get_resource_key() : ifsvh, ifctrh.get_resource_key() : ifctrh, nih.get_resource_key() : nih, niifh.get_resource_key() : niifh, @@ -99,6 +105,7 @@ RESOURCE_KEY_TO_HANDLER = { PATH_TO_HANDLER = { comph.get_path() : comph, ifaceh.get_path() : ifaceh, + ifsvh.get_path() : ifsvh, ifctrh.get_path() : ifctrh, nih.get_path() : nih, niifh.get_path() : niifh, -- GitLab From 9433813c2ab760ba33687b75b8844f47d844f84d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sat, 17 Jan 2026 21:07:12 +0000 Subject: [PATCH 20/79] Service component - L2NM gNMI OpenConfig Service Handler: - Fixed VlanIdPropagator and ConfigRuleComposer to handle switched VLANs --- .../ConfigRuleComposer.py | 185 +++++++++--------- .../l2nm_gnmi_openconfig/VlanIdPropagator.py | 39 ++-- 2 files changed, 105 insertions(+), 119 deletions(-) diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py index 03ae1211b..2fd5f17d4 100644 --- a/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging, netaddr, re +import json, logging from typing import Dict, List, Optional, Set, Tuple from common.DeviceTypes import DeviceTypeEnum -from common.proto.context_pb2 import ConfigActionEnum, Device, EndPoint, Service +from common.proto.context_pb2 import Device, EndPoint, Service from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from service.service.service_handler_api.AnyTreeTools import TreeNode @@ -26,19 +26,26 @@ LOGGER = logging.getLogger(__name__) NETWORK_INSTANCE = 'default' DEFAULT_NETWORK_INSTANCE = 'default' -RE_IF = re.compile(r'^\/interface\[([^\]]+)\]$') -RE_SUBIF = re.compile(r'^\/interface\[([^\]]+)\]\/subinterface\[([^\]]+)\]$') +def _safe_int(value: Optional[object]) -> Optional[int]: + try: + return int(value) if value is not None else None + except (TypeError, ValueError): + return None -def _interface( - interface : str, if_type : Optional[str] = 'l3ipvlan', index : int = 0, vlan_id : Optional[int] = None, - mtu : Optional[int] = None, enabled : bool = True +def _interface_switched_vlan( + interface : str, interface_mode : str, access_vlan_id : Optional[int] = None, + trunk_vlan_id : Optional[int] = None, native_vlan : int = 1 ) -> Tuple[str, Dict]: - path = '/interface[{:s}]/subinterface[{:d}]'.format(interface, index) - data = {'name': interface, 'type': if_type, 'index': index, 'enabled': enabled} - if if_type is not None: data['type'] = if_type - if vlan_id is not None: data['vlan_id'] = vlan_id - if mtu is not None: data['mtu'] = mtu - return path, data + path = '/interface[{:s}]/ethernet/switched-vlan'.format(interface) + config : Dict[str, object] = {'interface-mode': interface_mode} + if interface_mode == 'ACCESS': + if access_vlan_id is not None: + config['access-vlan'] = access_vlan_id + elif interface_mode == 'TRUNK': + config['native-vlan'] = native_vlan + if trunk_vlan_id is not None: + config['trunk-vlans'] = [trunk_vlan_id] + return path, {'config': config} def _network_instance(ni_name : str, ni_type : str) -> Tuple[str, Dict]: path = '/network_instance[{:s}]'.format(ni_name) @@ -50,65 +57,75 @@ def _network_instance_vlan(ni_name : str, vlan_id : int, vlan_name : str = None) data = {'name': ni_name, 'vlan_id': vlan_id, 'vlan_name': vlan_name} return path, data -def _network_instance_interface(ni_name : str, interface : str, sub_interface_index : int) -> Tuple[str, Dict]: - sub_interface_name = '{:s}.{:d}'.format(interface, sub_interface_index) - path = '/network_instance[{:s}]/interface[{:s}]'.format(ni_name, sub_interface_name) - data = {'name': ni_name, 'id': sub_interface_name, 'interface': interface, 'subinterface': sub_interface_index} - return path, data class EndpointComposer: def __init__(self, endpoint_uuid : str) -> None: self.uuid = endpoint_uuid self.objekt : Optional[EndPoint] = None - self.vlan_id = None + self.explicit_vlan_ids : Set[int] = set() + self.force_trunk = False + + def _add_vlan_id(self, vlan_id : Optional[int]) -> None: + if vlan_id is not None: + self.explicit_vlan_ids.add(vlan_id) + + def _configure_from_settings(self, json_settings : Dict) -> None: + if not isinstance(json_settings, dict): + return + vlan_id = _safe_int(json_settings.get('vlan_id', json_settings.get('vlan-id'))) + self._add_vlan_id(vlan_id) def configure(self, endpoint_obj : Optional[EndPoint], settings : Optional[TreeNode]) -> None: if endpoint_obj is not None: self.objekt = endpoint_obj if settings is None: return - json_settings : Dict = settings.value - if 'vlan_id' in json_settings: - self.vlan_id = json_settings['vlan_id'] - elif 'vlan-id' in json_settings: - self.vlan_id = json_settings['vlan-id'] - else: - MSG = 'VLAN ID not found. Tried: vlan_id and vlan-id. endpoint_obj={:s} settings={:s}' - LOGGER.warning(MSG.format(grpc_message_to_json_string(endpoint_obj), str(settings))) - - def get_config_rules(self, network_instance_name : str, delete : bool = False) -> List[Dict]: + json_settings : Dict = settings.value or dict() + self._configure_from_settings(json_settings) + for child in settings.children: + if isinstance(child.value, dict): + self._configure_from_settings(child.value) + + def set_force_trunk(self, enable : bool = True) -> None: + self.force_trunk = enable + + def _select_trunk_vlan_id(self, service_vlan_id : int) -> int: + if service_vlan_id in self.explicit_vlan_ids: + return service_vlan_id + if len(self.explicit_vlan_ids) > 0: + return sorted(self.explicit_vlan_ids)[0] + return service_vlan_id + + def get_vlan_ids(self) -> Set[int]: + return set(self.explicit_vlan_ids) + + def has_vlan(self, vlan_id : int) -> bool: + return vlan_id in self.get_vlan_ids() + + def get_config_rules(self, service_vlan_id : int, delete : bool = False) -> List[Dict]: + if self.objekt is None: + MSG = 'Endpoint object not defined for uuid={:s}' + LOGGER.warning(MSG.format(self.uuid)) + return [] config_rules : List[Dict] = list() - if self.vlan_id is None: - MSG = 'VLAN ID not defined for endpoint_obj={:s}' - LOGGER.warning(MSG.format(grpc_message_to_json_string(self.objekt))) - return config_rules - json_config_rule = json_config_rule_delete if delete else json_config_rule_set - - if network_instance_name != DEFAULT_NETWORK_INSTANCE: - config_rules.append(json_config_rule(*_network_instance_interface( - network_instance_name, self.objekt.name, self.vlan_id + if self.force_trunk or len(self.explicit_vlan_ids) > 0: + trunk_vlan_id = self._select_trunk_vlan_id(service_vlan_id) + config_rules.append(json_config_rule(*_interface_switched_vlan( + self.objekt.name, 'TRUNK', trunk_vlan_id=trunk_vlan_id ))) - - if delete: - config_rules.extend([ - json_config_rule(*_interface( - self.objekt.name, index=self.vlan_id, vlan_id=self.vlan_id, enabled=False - )), - ]) else: - config_rules.extend([ - json_config_rule(*_interface( - self.objekt.name, index=self.vlan_id, vlan_id=self.vlan_id, enabled=True - )), - ]) + config_rules.append(json_config_rule(*_interface_switched_vlan( + self.objekt.name, 'ACCESS', access_vlan_id=service_vlan_id + ))) return config_rules def dump(self) -> Dict: return { - 'vlan_id' : self.vlan_id, + 'explicit_vlan_ids' : list(self.explicit_vlan_ids), + 'force_trunk' : self.force_trunk, } - + def __str__(self): data = {'uuid': self.uuid} if self.objekt is not None: data['name'] = self.objekt.name @@ -132,6 +149,10 @@ class DeviceComposer: self.endpoints[endpoint_uuid] = EndpointComposer(endpoint_uuid) return self.endpoints[endpoint_uuid] + def _refresh_vlan_ids(self, service_vlan_id : int) -> None: + # Only keep the service VLAN; others are ignored for composition + self.vlan_ids = {service_vlan_id} + def configure(self, device_obj : Device, settings : Optional[TreeNode]) -> None: self.objekt = device_obj for endpoint_obj in device_obj.device_endpoints: @@ -139,37 +160,9 @@ class DeviceComposer: self.set_endpoint_alias(endpoint_obj.name, endpoint_uuid) self.get_endpoint(endpoint_obj.name).configure(endpoint_obj, None) - # Find management interfaces - mgmt_ifaces = set() - for config_rule in device_obj.device_config.config_rules: - if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue - if config_rule.WhichOneof('config_rule') != 'custom': continue - config_rule_custom = config_rule.custom - match = RE_IF.match(config_rule_custom.resource_key) - if match is None: continue - if_name = match.groups()[0] - resource_value = json.loads(config_rule_custom.resource_value) - management = resource_value.get('management', False) - if management: mgmt_ifaces.add(if_name) - - # Find data plane interfaces - for config_rule in device_obj.device_config.config_rules: - if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue - if config_rule.WhichOneof('config_rule') != 'custom': continue - config_rule_custom = config_rule.custom - - match = RE_SUBIF.match(config_rule_custom.resource_key) - if match is not None: - if_name, _ = match.groups() - if if_name in mgmt_ifaces: continue - resource_value = json.loads(config_rule_custom.resource_value) - if 'vlan_id' not in resource_value: continue - vlan_id = int(resource_value['vlan_id']) - self.vlan_ids.add(vlan_id) - endpoint = self.get_endpoint(if_name) - endpoint.vlan_id = vlan_id - - def get_config_rules(self, network_instance_name : str, delete : bool = False) -> List[Dict]: + def get_config_rules( + self, network_instance_name : str, service_vlan_id : int, delete : bool = False + ) -> List[Dict]: SELECTED_DEVICES = { DeviceTypeEnum.PACKET_POP.value, DeviceTypeEnum.PACKET_ROUTER.value, @@ -179,11 +172,12 @@ class DeviceComposer: json_config_rule = json_config_rule_delete if delete else json_config_rule_set config_rules : List[Dict] = list() + self._refresh_vlan_ids(service_vlan_id) if network_instance_name != DEFAULT_NETWORK_INSTANCE: json_config_rule(*_network_instance(network_instance_name, 'L3VRF')) for endpoint in self.endpoints.values(): - config_rules.extend(endpoint.get_config_rules(network_instance_name, delete=delete)) - for vlan_id in self.vlan_ids: + config_rules.extend(endpoint.get_config_rules(service_vlan_id, delete=delete)) + for vlan_id in sorted(self.vlan_ids): vlan_name = 'tfs-vlan-{:s}'.format(str(vlan_id)) config_rules.append(json_config_rule(*_network_instance_vlan( network_instance_name, vlan_id, vlan_name=vlan_name @@ -224,27 +218,30 @@ class ConfigRuleComposer: def configure(self, service_obj : Service, settings : Optional[TreeNode]) -> None: self.objekt = service_obj - if settings is None: return - json_settings : Dict = settings.value + if settings is None: + raise Exception('Service settings are required to extract vlan_id') + json_settings : Dict = settings.value or dict() if 'vlan_id' in json_settings: - self.vlan_id = json_settings['vlan_id'] + self.vlan_id = _safe_int(json_settings['vlan_id']) elif 'vlan-id' in json_settings: - self.vlan_id = json_settings['vlan-id'] + self.vlan_id = _safe_int(json_settings['vlan-id']) else: MSG = 'VLAN ID not found. Tried: vlan_id and vlan-id. service_obj={:s} settings={:s}' - LOGGER.warning(MSG.format(grpc_message_to_json_string(service_obj), str(settings))) + raise Exception(MSG.format(grpc_message_to_json_string(service_obj), str(settings))) + + if self.vlan_id is None: + MSG = 'Invalid VLAN ID value in service settings: {:s}' + raise Exception(MSG.format(str(json_settings))) def get_config_rules( self, network_instance_name : str = NETWORK_INSTANCE, delete : bool = False ) -> Dict[str, List[Dict]]: if self.vlan_id is None: - MSG = 'VLAN ID not defined for service_obj={:s}' - LOGGER.warning(MSG.format(grpc_message_to_json_string(self.objekt))) - return dict() + raise Exception('VLAN ID must be configured at service level before composing rules') return { - device_uuid : device.get_config_rules(network_instance_name, delete=delete) + device_uuid : device.get_config_rules(network_instance_name, self.vlan_id, delete=delete) for device_uuid, device in self.devices.items() } diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py index da810651f..69e2afb62 100644 --- a/src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py @@ -14,6 +14,7 @@ import json, logging from typing import List, Optional, Tuple +from common.DeviceTypes import DeviceTypeEnum from .ConfigRuleComposer import ConfigRuleComposer LOGGER = logging.getLogger(__name__) @@ -21,6 +22,16 @@ LOGGER = logging.getLogger(__name__) class VlanIdPropagator: def __init__(self, config_rule_composer : ConfigRuleComposer) -> None: self._config_rule_composer = config_rule_composer + self._router_types = { + DeviceTypeEnum.PACKET_ROUTER.value, + DeviceTypeEnum.EMULATED_PACKET_ROUTER.value, + DeviceTypeEnum.PACKET_POP.value, + DeviceTypeEnum.PACKET_RADIO_ROUTER.value, + DeviceTypeEnum.EMULATED_PACKET_RADIO_ROUTER.value, + } + + def _is_router_device(self, device) -> bool: + return device.objekt is not None and device.objekt.device_type in self._router_types def compose(self, connection_hop_list : List[Tuple[str, str, Optional[str]]]) -> None: link_endpoints = self._compute_link_endpoints(connection_hop_list) @@ -71,28 +82,6 @@ class VlanIdPropagator: device_b = self._config_rule_composer.get_device(device_uuid_b) endpoint_b = device_b.get_endpoint(endpoint_uuid_b) - svc_vlan_id = self._config_rule_composer.vlan_id - ep_a_vlan_id = endpoint_a.vlan_id - ep_b_vlan_id = endpoint_b.vlan_id - - if ep_a_vlan_id is None and ep_b_vlan_id is None: - device_a.vlan_ids.add(svc_vlan_id) - endpoint_a.vlan_id = svc_vlan_id - device_b.vlan_ids.add(svc_vlan_id) - endpoint_b.vlan_id = svc_vlan_id - elif ep_a_vlan_id is not None and ep_b_vlan_id is None: - if ep_a_vlan_id != svc_vlan_id: - MSG = 'Incompatible VLAN-IDs: endpoint_a({:s}), service({:s})' - raise Exception(MSG.format(str(endpoint_a), str(svc_vlan_id))) - device_b.vlan_ids.add(svc_vlan_id) - endpoint_b.vlan_id = svc_vlan_id - elif ep_a_vlan_id is None and ep_b_vlan_id is not None: - if ep_b_vlan_id != svc_vlan_id: - MSG = 'Incompatible VLAN-IDs: endpoint_b({:s}), service({:s})' - raise Exception(MSG.format(str(endpoint_b), str(svc_vlan_id))) - device_a.vlan_ids.add(svc_vlan_id) - endpoint_a.vlan_id = svc_vlan_id - elif ep_a_vlan_id is not None and ep_b_vlan_id is not None: - if ep_a_vlan_id != svc_vlan_id or ep_b_vlan_id != svc_vlan_id: - MSG = 'Incompatible VLAN-IDs: endpoint_a({:s}), endpoint_a({:s}), service({:s})' - raise Exception(MSG.format(str(endpoint_a), str(endpoint_b), str(svc_vlan_id))) + if self._is_router_device(device_a) and self._is_router_device(device_b): + endpoint_a.set_force_trunk() + endpoint_b.set_force_trunk() -- GitLab From c9190bc4290a41adcdf462c82bfd7fa95939c0a9 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sat, 17 Jan 2026 21:35:40 +0000 Subject: [PATCH 21/79] End-to-end test - L2 VPN gNMI OpenConfig: - Updated TFS service descriptors (VLAN 100 and VLAN 125) tagged and untagged --- .../clab/l2_vpn_gnmi_oc.clab.yml | 6 ++++++ .../data/tfs-service-vlan-100-tagged.json | 21 +++++++++++++++++++ ...son => tfs-service-vlan-100-untagged.json} | 0 .../data/tfs-service-vlan-125-tagged.json | 21 +++++++++++++++++++ ...son => tfs-service-vlan-125-untagged.json} | 0 5 files changed, 48 insertions(+) create mode 100644 src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100-tagged.json rename src/tests/l2_vpn_gnmi_oc/data/{tfs-service-vlan-100.json => tfs-service-vlan-100-untagged.json} (100%) create mode 100644 src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125-tagged.json rename src/tests/l2_vpn_gnmi_oc/data/{tfs-service-vlan-125.json => tfs-service-vlan-125-untagged.json} (100%) diff --git a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml index 75cc2c90c..7374b92e0 100644 --- a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml +++ b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml @@ -60,6 +60,9 @@ topology: - ip link add link eth1 name eth1.100 type vlan id 100 - ip addr add 172.16.1.10/24 dev eth1.100 - ip link set eth1.100 up + - ip link add link eth1 name eth1.125 type vlan id 125 + - ip addr add 172.16.2.10/24 dev eth1.125 + - ip link set eth1.125 up dc2: kind: linux @@ -69,6 +72,9 @@ topology: - ip link add link eth1 name eth1.100 type vlan id 100 - ip addr add 172.16.1.20/24 dev eth1.100 - ip link set eth1.100 up + - ip link add link eth1 name eth1.125 type vlan id 125 + - ip addr add 172.16.2.20/24 dev eth1.125 + - ip link set eth1.125 up links: - endpoints: ["r1:eth2", "r2:eth1"] diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100-tagged.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100-tagged.json new file mode 100644 index 000000000..1af3153ce --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100-tagged.json @@ -0,0 +1,21 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "tfs-l2vpn-svc-100"} + }, + "service_type": "SERVICETYPE_L2NM", + "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "int"}}, + {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "int"}} + ], + "service_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": { + "resource_key": "/settings", + "resource_value": {"vlan_id": 100, "access_vlan_tagged": true} + }} + ]} + } + ] +} diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100-untagged.json similarity index 100% rename from src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100.json rename to src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100-untagged.json diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125-tagged.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125-tagged.json new file mode 100644 index 000000000..c5cc6b5e9 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125-tagged.json @@ -0,0 +1,21 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "tfs-l2vpn-svc-125"} + }, + "service_type": "SERVICETYPE_L2NM", + "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "int"}}, + {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "int"}} + ], + "service_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": { + "resource_key": "/settings", + "resource_value": {"vlan_id": 125, "access_vlan_tagged": true} + }} + ]} + } + ] +} diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125-untagged.json similarity index 100% rename from src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125.json rename to src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125-untagged.json -- GitLab From 8781758327c618bc034ba916602853dc72e63945 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sat, 17 Jan 2026 21:36:15 +0000 Subject: [PATCH 22/79] Service component - L2NM gNMI OpenConfig Service Handler: - Enabled force of explicit tagging on endpoints towards clients --- .../ConfigRuleComposer.py | 47 +++++++++++++++++-- 1 file changed, 42 insertions(+), 5 deletions(-) diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py index 2fd5f17d4..749591709 100644 --- a/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py @@ -32,6 +32,21 @@ def _safe_int(value: Optional[object]) -> Optional[int]: except (TypeError, ValueError): return None +def _safe_bool(value: Optional[object]) -> Optional[bool]: + if value is None: + return None + if isinstance(value, bool): + return value + if isinstance(value, (int, float)): + return bool(value) + if isinstance(value, str): + lowered = value.strip().lower() + if lowered in {'true', '1', 'yes', 'y', 'on', 'tagged'}: + return True + if lowered in {'false', '0', 'no', 'n', 'off', 'untagged'}: + return False + return None + def _interface_switched_vlan( interface : str, interface_mode : str, access_vlan_id : Optional[int] = None, trunk_vlan_id : Optional[int] = None, native_vlan : int = 1 @@ -102,14 +117,16 @@ class EndpointComposer: def has_vlan(self, vlan_id : int) -> bool: return vlan_id in self.get_vlan_ids() - def get_config_rules(self, service_vlan_id : int, delete : bool = False) -> List[Dict]: + def get_config_rules( + self, service_vlan_id : int, access_vlan_tagged : bool = False, delete : bool = False + ) -> List[Dict]: if self.objekt is None: MSG = 'Endpoint object not defined for uuid={:s}' LOGGER.warning(MSG.format(self.uuid)) return [] config_rules : List[Dict] = list() json_config_rule = json_config_rule_delete if delete else json_config_rule_set - if self.force_trunk or len(self.explicit_vlan_ids) > 0: + if self.force_trunk or access_vlan_tagged or len(self.explicit_vlan_ids) > 0: trunk_vlan_id = self._select_trunk_vlan_id(service_vlan_id) config_rules.append(json_config_rule(*_interface_switched_vlan( self.objekt.name, 'TRUNK', trunk_vlan_id=trunk_vlan_id @@ -161,7 +178,8 @@ class DeviceComposer: self.get_endpoint(endpoint_obj.name).configure(endpoint_obj, None) def get_config_rules( - self, network_instance_name : str, service_vlan_id : int, delete : bool = False + self, network_instance_name : str, service_vlan_id : int, + access_vlan_tagged : bool = False, delete : bool = False ) -> List[Dict]: SELECTED_DEVICES = { DeviceTypeEnum.PACKET_POP.value, @@ -176,7 +194,9 @@ class DeviceComposer: if network_instance_name != DEFAULT_NETWORK_INSTANCE: json_config_rule(*_network_instance(network_instance_name, 'L3VRF')) for endpoint in self.endpoints.values(): - config_rules.extend(endpoint.get_config_rules(service_vlan_id, delete=delete)) + config_rules.extend(endpoint.get_config_rules( + service_vlan_id, access_vlan_tagged=access_vlan_tagged, delete=delete + )) for vlan_id in sorted(self.vlan_ids): vlan_name = 'tfs-vlan-{:s}'.format(str(vlan_id)) config_rules.append(json_config_rule(*_network_instance_vlan( @@ -206,6 +226,7 @@ class ConfigRuleComposer: self.aliases : Dict[str, str] = dict() # device_name => device_uuid self.devices : Dict[str, DeviceComposer] = dict() # device_uuid => DeviceComposer self.vlan_id = None + self.access_vlan_tagged = False def set_device_alias(self, device_name : str, device_uuid : str) -> None: self.aliases[device_name] = device_uuid @@ -234,6 +255,18 @@ class ConfigRuleComposer: MSG = 'Invalid VLAN ID value in service settings: {:s}' raise Exception(MSG.format(str(json_settings))) + access_vlan_tagged = json_settings.get('access_vlan_tagged', json_settings.get('access-vlan-tagged')) + if access_vlan_tagged is None: + self.access_vlan_tagged = False + else: + parsed = _safe_bool(access_vlan_tagged) + if parsed is None: + MSG = 'Invalid access_vlan_tagged value in service settings: {:s}' + LOGGER.warning(MSG.format(str(access_vlan_tagged))) + self.access_vlan_tagged = False + else: + self.access_vlan_tagged = parsed + def get_config_rules( self, network_instance_name : str = NETWORK_INSTANCE, delete : bool = False ) -> Dict[str, List[Dict]]: @@ -241,7 +274,10 @@ class ConfigRuleComposer: raise Exception('VLAN ID must be configured at service level before composing rules') return { - device_uuid : device.get_config_rules(network_instance_name, self.vlan_id, delete=delete) + device_uuid : device.get_config_rules( + network_instance_name, self.vlan_id, + access_vlan_tagged=self.access_vlan_tagged, delete=delete + ) for device_uuid, device in self.devices.items() } @@ -252,4 +288,5 @@ class ConfigRuleComposer: for device_uuid, device in self.devices.items() }, 'vlan_id': self.vlan_id, + 'access_vlan_tagged': self.access_vlan_tagged, } -- GitLab From 63f109b4e591528927417291681dc21dae0046ee Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 19 Jan 2026 10:00:09 +0000 Subject: [PATCH 23/79] Hackfest #1 - Mock OSM: - Added logging basic config --- hackfest/mock_osm/__main__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/hackfest/mock_osm/__main__.py b/hackfest/mock_osm/__main__.py index 8d4d10111..28227f47b 100644 --- a/hackfest/mock_osm/__main__.py +++ b/hackfest/mock_osm/__main__.py @@ -15,6 +15,7 @@ import cmd, logging from .MockOSM import MockOSM +logging.basicConfig(level=logging.DEBUG) LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -- GitLab From 71ea0da4feba5316c2c50419859469ae838238a3 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 19 Jan 2026 10:23:15 +0000 Subject: [PATCH 24/79] End-to-end test - L2 VPN gNMI OpenConfig: - Removed VLAN 100 - Updated IETF L2 VPN to use Mock OSM - Upgraded ping_checks in CI/CD pipeline test --- src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml | 76 +-- .../clab/l2_vpn_gnmi_oc.clab.yml | 6 - .../data/ietf-l2vpn-service.json | 63 -- .../data/tfs-service-vlan-100-tagged.json | 21 - .../data/tfs-service-vlan-100-untagged.json | 21 - src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py | 28 +- src/tests/l2_vpn_gnmi_oc/tests/MockOSM.py | 62 ++ .../l2_vpn_gnmi_oc/tests/OSM_Constants.py | 53 ++ src/tests/l2_vpn_gnmi_oc/tests/Tools.py | 109 ---- .../tests/WimconnectorIETFL2VPN.py | 547 ++++++++++++++++++ .../l2_vpn_gnmi_oc/tests/acknowledgements.txt | 3 + src/tests/l2_vpn_gnmi_oc/tests/sdnconn.py | 242 ++++++++ .../tests/test_service_ietf_create.py | 38 +- .../tests/test_service_ietf_remove.py | 28 +- .../tests/test_service_tfs_create.py | 5 +- .../tests/test_service_tfs_remove.py | 5 +- 16 files changed, 1012 insertions(+), 295 deletions(-) delete mode 100644 src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json delete mode 100644 src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100-tagged.json delete mode 100644 src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100-untagged.json create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/MockOSM.py create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/OSM_Constants.py delete mode 100644 src/tests/l2_vpn_gnmi_oc/tests/Tools.py create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/WimconnectorIETFL2VPN.py create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/acknowledgements.txt create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/sdnconn.py diff --git a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml index 2655cee03..fd996680e 100644 --- a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml +++ b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml @@ -183,6 +183,20 @@ end2end_test l2_vpn_gnmi_oc: # done - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + - | + ping_check() { + local SRC=$1 DST_IP=$2 PATTERN=$3 + local OUTPUT + OUTPUT=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=${SRC} --cmd "ping -n -c3 ${DST_IP}" --format json) + echo "$OUTPUT" + if echo "$OUTPUT" | grep -E "$PATTERN" >/dev/null; then + echo "PASSED ${SRC}->${DST_IP} + else + echo "FAILED ${SRC}->${DST_IP} + fi + echo "$OUTPUT" | grep -E "$PATTERN" + } + # Run end-to-end test: onboard scenario - > docker run -t --rm --name ${TEST_NAME} --network=host @@ -190,6 +204,16 @@ end2end_test l2_vpn_gnmi_oc: --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-onboarding.sh + # Dump configuration of the routers (after configure TFS service) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Run end-to-end test: test no connectivity with ping + - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received, 100% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received, 100% packet loss" + # Run end-to-end test: configure service TFS - > docker run -t --rm --name ${TEST_NAME} --network=host @@ -203,24 +227,9 @@ end2end_test l2_vpn_gnmi_oc: - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" # Run end-to-end test: test connectivity with ping - - export TEST1_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json) - - echo $TEST1_10 - - echo $TEST1_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST1_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json) - - echo $TEST1_1 - - echo $TEST1_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST2_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json) - - echo $TEST2_1 - - echo $TEST2_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST2_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json) - - echo $TEST2_10 - - echo $TEST2_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST3_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json) - - echo $TEST3_1 - - echo $TEST3_1 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' - - export TEST3_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json) - - echo $TEST3_10 - - echo $TEST3_10 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' + - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received, 100% packet loss" # Run end-to-end test: deconfigure service TFS - > @@ -234,6 +243,11 @@ end2end_test l2_vpn_gnmi_oc: - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + # Run end-to-end test: test no connectivity with ping + - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received, 100% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received, 100% packet loss" + # Run end-to-end test: configure service IETF - > docker run -t --rm --name ${TEST_NAME} --network=host @@ -247,24 +261,9 @@ end2end_test l2_vpn_gnmi_oc: - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" # Run end-to-end test: test connectivity with ping - - export TEST1_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json) - - echo $TEST1_10 - - echo $TEST1_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST1_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json) - - echo $TEST1_1 - - echo $TEST1_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST2_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json) - - echo $TEST2_1 - - echo $TEST2_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST2_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json) - - echo $TEST2_10 - - echo $TEST2_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST3_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json) - - echo $TEST3_1 - - echo $TEST3_1 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' - - export TEST3_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json) - - echo $TEST3_10 - - echo $TEST3_10 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' + - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received, 100% packet loss" # Run end-to-end test: deconfigure service IETF - > @@ -278,6 +277,11 @@ end2end_test l2_vpn_gnmi_oc: - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + # Run end-to-end test: test no connectivity with ping + - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received, 100% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received, 100% packet loss" + # Run end-to-end test: cleanup scenario - > docker run -t --rm --name ${TEST_NAME} --network=host diff --git a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml index 7374b92e0..44dfb9b1b 100644 --- a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml +++ b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml @@ -57,9 +57,6 @@ topology: mgmt-ipv4: 172.20.20.201 exec: - ip link set address 00:c1:ab:00:01:0a dev eth1 - - ip link add link eth1 name eth1.100 type vlan id 100 - - ip addr add 172.16.1.10/24 dev eth1.100 - - ip link set eth1.100 up - ip link add link eth1 name eth1.125 type vlan id 125 - ip addr add 172.16.2.10/24 dev eth1.125 - ip link set eth1.125 up @@ -69,9 +66,6 @@ topology: mgmt-ipv4: 172.20.20.202 exec: - ip link set address 00:c1:ab:00:01:14 dev eth1 - - ip link add link eth1 name eth1.100 type vlan id 100 - - ip addr add 172.16.1.20/24 dev eth1.100 - - ip link set eth1.100 up - ip link add link eth1 name eth1.125 type vlan id 125 - ip addr add 172.16.2.20/24 dev eth1.125 - ip link set eth1.125 up diff --git a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json deleted file mode 100644 index b649400eb..000000000 --- a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "ietf-l2vpn-svc:l2vpn-svc": { - "vpn-services": {"vpn-service": [{"vpn-id": "ietf-l2vpn-svc"}]}, - "sites": { - "site": [ - { - "site-id": "site_DC1", - "management": {"type": "ietf-l2vpn-svc:provider-managed"}, - "locations": {"location": [{"location-id": "DC1"}]}, - "devices": {"device": [{"device-id": "dc1", "location": "DC1"}]}, - "site-network-accesses": { - "site-network-access": [ - { - "site-network-access-id": "eth1", - "site-network-access-type": "ietf-l2vpn-svc:multipoint", - "device-reference": "dc1", - "vpn-attachment": {"vpn-id": "ietf-l2vpn-svc", "site-role": "ietf-l2vpn-svc:spoke-role"}, - "service": { - "svc-mtu": 1500, - "svc-input-bandwidth": 1000000000, - "svc-output-bandwidth": 1000000000, - "qos": {"qos-profile": {"classes": {"class": [{ - "class-id": "qos-realtime", - "direction": "ietf-l2vpn-svc:both", - "latency": {"latency-boundary": 10}, - "bandwidth": {"guaranteed-bw-percent": 100} - }]}}} - } - } - ] - } - }, - { - "site-id": "site_DC2", - "management": {"type": "ietf-l2vpn-svc:provider-managed"}, - "locations": {"location": [{"location-id": "DC2"}]}, - "devices": {"device": [{"device-id": "dc2", "location": "DC2"}]}, - "site-network-accesses": { - "site-network-access": [ - { - "site-network-access-id": "eth1", - "site-network-access-type": "ietf-l2vpn-svc:multipoint", - "device-reference": "dc2", - "vpn-attachment": {"vpn-id": "ietf-l2vpn-svc", "site-role": "ietf-l2vpn-svc:hub-role"}, - "service": { - "svc-mtu": 1500, - "svc-input-bandwidth": 1000000000, - "svc-output-bandwidth": 1000000000, - "qos": {"qos-profile": {"classes": {"class": [{ - "class-id": "qos-realtime", - "direction": "ietf-l2vpn-svc:both", - "latency": {"latency-boundary": 10}, - "bandwidth": {"guaranteed-bw-percent": 100} - }]}}} - } - } - ] - } - } - ] - } - } -} diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100-tagged.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100-tagged.json deleted file mode 100644 index 1af3153ce..000000000 --- a/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100-tagged.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "services": [ - { - "service_id": { - "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "tfs-l2vpn-svc-100"} - }, - "service_type": "SERVICETYPE_L2NM", - "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, - "service_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "int"}}, - {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "int"}} - ], - "service_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": { - "resource_key": "/settings", - "resource_value": {"vlan_id": 100, "access_vlan_tagged": true} - }} - ]} - } - ] -} diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100-untagged.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100-untagged.json deleted file mode 100644 index 0de688f2c..000000000 --- a/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-100-untagged.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "services": [ - { - "service_id": { - "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "tfs-l2vpn-svc-100"} - }, - "service_type": "SERVICETYPE_L2NM", - "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, - "service_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "int"}}, - {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "int"}} - ], - "service_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": { - "resource_key": "/settings", - "resource_value": {"vlan_id": 100} - }} - ]} - } - ] -} diff --git a/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py b/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py index 5997e58c8..70b689ede 100644 --- a/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py +++ b/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py @@ -13,31 +13,39 @@ # limitations under the License. import pytest +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_service_port_http from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient -from monitoring.client.MonitoringClient import MonitoringClient from service.client.ServiceClient import ServiceClient +from tests.tools.mock_osm.MockOSM import MockOSM +from .OSM_Constants import WIM_MAPPING + +NBI_ADDRESS = get_service_host(ServiceNameEnum.NBI) +NBI_PORT = get_service_port_http(ServiceNameEnum.NBI) +NBI_USERNAME = 'admin' +NBI_PASSWORD = 'admin' +NBI_BASE_URL = '' @pytest.fixture(scope='session') -def context_client(): - _client = ContextClient() - yield _client - _client.close() +def osm_wim() -> MockOSM: + wim_url = 'http://{:s}:{:d}'.format(NBI_ADDRESS, NBI_PORT) + return MockOSM(wim_url, WIM_MAPPING, NBI_USERNAME, NBI_PASSWORD) @pytest.fixture(scope='session') -def device_client(): - _client = DeviceClient() +def context_client() -> ContextClient: + _client = ContextClient() yield _client _client.close() @pytest.fixture(scope='session') -def monitoring_client(): - _client = MonitoringClient() +def device_client() -> DeviceClient: + _client = DeviceClient() yield _client _client.close() @pytest.fixture(scope='session') -def service_client(): +def service_client() -> ServiceClient: _client = ServiceClient() yield _client _client.close() diff --git a/src/tests/l2_vpn_gnmi_oc/tests/MockOSM.py b/src/tests/l2_vpn_gnmi_oc/tests/MockOSM.py new file mode 100644 index 000000000..2361b44b6 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/MockOSM.py @@ -0,0 +1,62 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from .WimconnectorIETFL2VPN import WimconnectorIETFL2VPN + +LOGGER = logging.getLogger(__name__) + +class MockOSM: + def __init__(self, url, mapping, username, password): + wim = {'wim_url': url} + wim_account = {'user': username, 'password': password} + config = {'mapping_not_needed': False, 'service_endpoint_mapping': mapping} + self.wim = WimconnectorIETFL2VPN(wim, wim_account, config=config) + self.conn_info = {} # internal database emulating OSM storage provided to WIM Connectors + + def create_connectivity_service(self, service_type, connection_points): + LOGGER.info('[create_connectivity_service] service_type={:s}'.format(str(service_type))) + LOGGER.info('[create_connectivity_service] connection_points={:s}'.format(str(connection_points))) + self.wim.check_credentials() + result = self.wim.create_connectivity_service(service_type, connection_points) + LOGGER.info('[create_connectivity_service] result={:s}'.format(str(result))) + service_uuid, conn_info = result + self.conn_info[service_uuid] = conn_info + return service_uuid + + def get_connectivity_service_status(self, service_uuid): + LOGGER.info('[get_connectivity_service] service_uuid={:s}'.format(str(service_uuid))) + conn_info = self.conn_info.get(service_uuid) + if conn_info is None: raise Exception('ServiceId({:s}) not found'.format(str(service_uuid))) + LOGGER.info('[get_connectivity_service] conn_info={:s}'.format(str(conn_info))) + self.wim.check_credentials() + result = self.wim.get_connectivity_service_status(service_uuid, conn_info=conn_info) + LOGGER.info('[get_connectivity_service] result={:s}'.format(str(result))) + return result + + def edit_connectivity_service(self, service_uuid, connection_points): + LOGGER.info('[edit_connectivity_service] service_uuid={:s}'.format(str(service_uuid))) + LOGGER.info('[edit_connectivity_service] connection_points={:s}'.format(str(connection_points))) + conn_info = self.conn_info.get(service_uuid) + if conn_info is None: raise Exception('ServiceId({:s}) not found'.format(str(service_uuid))) + LOGGER.info('[edit_connectivity_service] conn_info={:s}'.format(str(conn_info))) + self.wim.edit_connectivity_service(service_uuid, conn_info=conn_info, connection_points=connection_points) + + def delete_connectivity_service(self, service_uuid): + LOGGER.info('[delete_connectivity_service] service_uuid={:s}'.format(str(service_uuid))) + conn_info = self.conn_info.get(service_uuid) + if conn_info is None: raise Exception('ServiceId({:s}) not found'.format(str(service_uuid))) + LOGGER.info('[delete_connectivity_service] conn_info={:s}'.format(str(conn_info))) + self.wim.check_credentials() + self.wim.delete_connectivity_service(service_uuid, conn_info=conn_info) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/OSM_Constants.py b/src/tests/l2_vpn_gnmi_oc/tests/OSM_Constants.py new file mode 100644 index 000000000..13cb57bf2 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/OSM_Constants.py @@ -0,0 +1,53 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Ref: https://osm.etsi.org/wikipub/index.php/WIM +WIM_MAPPING = [ + { + 'device-id' : 'dc1', # pop_switch_dpid + #'device_interface_id' : ??, # pop_switch_port + 'service_endpoint_id' : 'ep-1', # wan_service_endpoint_id + 'service_mapping_info': { # wan_service_mapping_info, other extra info + 'bearer': {'bearer-reference': 'r1:Ethernet10'}, + 'site-id': '1', + }, + #'switch_dpid' : ??, # wan_switch_dpid + #'switch_port' : ??, # wan_switch_port + #'datacenter_id' : ??, # vim_account + }, + { + 'device-id' : 'dc2', # pop_switch_dpid + #'device_interface_id' : ??, # pop_switch_port + 'service_endpoint_id' : 'ep-2', # wan_service_endpoint_id + 'service_mapping_info': { # wan_service_mapping_info, other extra info + 'bearer': {'bearer-reference': 'r3:Ethernet10'}, + 'site-id': '2', + }, + #'switch_dpid' : ??, # wan_switch_dpid + #'switch_port' : ??, # wan_switch_port + #'datacenter_id' : ??, # vim_account + }, +] + +SERVICE_TYPE = 'ELINE' + +SERVICE_CONNECTION_POINTS = [ + {'service_endpoint_id': 'ep-1', + 'service_endpoint_encapsulation_type': 'dot1q', + 'service_endpoint_encapsulation_info': {'vlan': 125}}, + {'service_endpoint_id': 'ep-2', + 'service_endpoint_encapsulation_type': 'dot1q', + 'service_endpoint_encapsulation_info': {'vlan': 125}}, +] diff --git a/src/tests/l2_vpn_gnmi_oc/tests/Tools.py b/src/tests/l2_vpn_gnmi_oc/tests/Tools.py deleted file mode 100644 index bbee845cd..000000000 --- a/src/tests/l2_vpn_gnmi_oc/tests/Tools.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum, logging, requests -from typing import Any, Dict, List, Optional, Set, Union -from common.Constants import ServiceNameEnum -from common.Settings import get_service_host, get_service_port_http - -NBI_ADDRESS = get_service_host(ServiceNameEnum.NBI) -NBI_PORT = get_service_port_http(ServiceNameEnum.NBI) -NBI_USERNAME = 'admin' -NBI_PASSWORD = 'admin' -NBI_BASE_URL = '' - -class RestRequestMethod(enum.Enum): - GET = 'get' - POST = 'post' - PUT = 'put' - PATCH = 'patch' - DELETE = 'delete' - -EXPECTED_STATUS_CODES : Set[int] = { - requests.codes['OK' ], - requests.codes['CREATED' ], - requests.codes['ACCEPTED' ], - requests.codes['NO_CONTENT'], -} - -def do_rest_request( - method : RestRequestMethod, url : str, body : Optional[Any] = None, timeout : int = 10, - allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, - logger : Optional[logging.Logger] = None -) -> Optional[Union[Dict, List]]: - request_url = 'http://{:s}:{:s}@{:s}:{:d}{:s}{:s}'.format( - NBI_USERNAME, NBI_PASSWORD, NBI_ADDRESS, NBI_PORT, str(NBI_BASE_URL), url - ) - - if logger is not None: - msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) - if body is not None: msg += ' body={:s}'.format(str(body)) - logger.warning(msg) - reply = requests.request(method.value, request_url, timeout=timeout, json=body, allow_redirects=allow_redirects) - if logger is not None: - logger.warning('Reply: {:s}'.format(str(reply.text))) - assert reply.status_code in expected_status_codes, 'Reply failed with status code {:d}'.format(reply.status_code) - - if reply.content and len(reply.content) > 0: return reply.json() - return None - -def do_rest_get_request( - url : str, body : Optional[Any] = None, timeout : int = 10, - allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, - logger : Optional[logging.Logger] = None -) -> Optional[Union[Dict, List]]: - return do_rest_request( - RestRequestMethod.GET, url, body=body, timeout=timeout, allow_redirects=allow_redirects, - expected_status_codes=expected_status_codes, logger=logger - ) - -def do_rest_post_request( - url : str, body : Optional[Any] = None, timeout : int = 10, - allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, - logger : Optional[logging.Logger] = None -) -> Optional[Union[Dict, List]]: - return do_rest_request( - RestRequestMethod.POST, url, body=body, timeout=timeout, allow_redirects=allow_redirects, - expected_status_codes=expected_status_codes, logger=logger - ) - -def do_rest_put_request( - url : str, body : Optional[Any] = None, timeout : int = 10, - allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, - logger : Optional[logging.Logger] = None -) -> Optional[Union[Dict, List]]: - return do_rest_request( - RestRequestMethod.PUT, url, body=body, timeout=timeout, allow_redirects=allow_redirects, - expected_status_codes=expected_status_codes, logger=logger - ) - -def do_rest_patch_request( - url : str, body : Optional[Any] = None, timeout : int = 10, - allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, - logger : Optional[logging.Logger] = None -) -> Optional[Union[Dict, List]]: - return do_rest_request( - RestRequestMethod.PATCH, url, body=body, timeout=timeout, allow_redirects=allow_redirects, - expected_status_codes=expected_status_codes, logger=logger - ) - -def do_rest_delete_request( - url : str, body : Optional[Any] = None, timeout : int = 10, - allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, - logger : Optional[logging.Logger] = None -) -> Optional[Union[Dict, List]]: - return do_rest_request( - RestRequestMethod.DELETE, url, body=body, timeout=timeout, allow_redirects=allow_redirects, - expected_status_codes=expected_status_codes, logger=logger - ) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/WimconnectorIETFL2VPN.py b/src/tests/l2_vpn_gnmi_oc/tests/WimconnectorIETFL2VPN.py new file mode 100644 index 000000000..6a616eb75 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/WimconnectorIETFL2VPN.py @@ -0,0 +1,547 @@ +# -*- coding: utf-8 -*- +## +# Copyright 2018 Telefonica +# All Rights Reserved. +# +# Contributors: Oscar Gonzalez de Dios, Manuel Lopez Bravo, Guillermo Pajares Martin +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This work has been performed in the context of the Metro-Haul project - +# funded by the European Commission under Grant number 761727 through the +# Horizon 2020 program. +## +"""The SDN/WIM connector is responsible for establishing wide area network +connectivity. + +This SDN/WIM connector implements the standard IETF RFC 8466 "A YANG Data + Model for Layer 2 Virtual Private Network (L2VPN) Service Delivery" + +It receives the endpoints and the necessary details to request +the Layer 2 service. +""" +import requests +import uuid +import logging +import copy +#from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError +from .sdnconn import SdnConnectorBase, SdnConnectorError + +"""Check layer where we move it""" + + +class WimconnectorIETFL2VPN(SdnConnectorBase): + def __init__(self, wim, wim_account, config=None, logger=None): + """IETF L2VPN WIM connector + + Arguments: (To be completed) + wim (dict): WIM record, as stored in the database + wim_account (dict): WIM account record, as stored in the database + """ + self.logger = logging.getLogger("ro.sdn.ietfl2vpn") + super().__init__(wim, wim_account, config, logger) + self.headers = {"Content-Type": "application/json"} + self.mappings = { + m["service_endpoint_id"]: m for m in self.service_endpoint_mapping + } + self.user = wim_account.get("user") + self.passwd = wim_account.get("password") # replace "passwordd" -> "password" + + if self.user and self.passwd is not None: + self.auth = (self.user, self.passwd) + else: + self.auth = None + + self.logger.info("IETFL2VPN Connector Initialized.") + + def check_credentials(self): + endpoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format( + self.wim["wim_url"] + ) + + #try: + # response = requests.get(endpoint, auth=self.auth) + # http_code = response.status_code + #except requests.exceptions.RequestException as e: + # raise SdnConnectorError(e.response, http_code=503) + + #if http_code != 200: + # raise SdnConnectorError("Failed while authenticating", http_code=http_code) + + self.logger.info("Credentials checked") + + def get_connectivity_service_status(self, service_uuid, conn_info=None): + """Monitor the status of the connectivity service stablished + + Arguments: + service_uuid: Connectivity service unique identifier + + Returns: + Examples:: + {'sdn_status': 'ACTIVE'} + {'sdn_status': 'INACTIVE'} + {'sdn_status': 'DOWN'} + {'sdn_status': 'ERROR'} + """ + try: + self.logger.info("Sending get connectivity service stuatus") + servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format( + self.wim["wim_url"], service_uuid + ) + response = requests.get(servicepoint, auth=self.auth) + self.logger.warning('response.status_code={:s}'.format(str(response.status_code))) + if response.status_code != requests.codes.ok: + raise SdnConnectorError( + "Unable to obtain connectivity servcice status", + http_code=response.status_code, + ) + + service_status = {"sdn_status": "ACTIVE"} + + return service_status + except requests.exceptions.ConnectionError: + raise SdnConnectorError("Request Timeout", http_code=408) + + def search_mapp(self, connection_point): + id = connection_point["service_endpoint_id"] + if id not in self.mappings: + raise SdnConnectorError("Endpoint {} not located".format(str(id))) + else: + return self.mappings[id] + + def create_connectivity_service(self, service_type, connection_points, **kwargs): + """Stablish WAN connectivity between the endpoints + + Arguments: + service_type (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2), + ``L3``. + connection_points (list): each point corresponds to + an entry point from the DC to the transport network. One + connection point serves to identify the specific access and + some other service parameters, such as encapsulation type. + Represented by a dict as follows:: + + { + "service_endpoint_id": ..., (str[uuid]) + "service_endpoint_encapsulation_type": ..., + (enum: none, dot1q, ...) + "service_endpoint_encapsulation_info": { + ... (dict) + "vlan": ..., (int, present if encapsulation is dot1q) + "vni": ... (int, present if encapsulation is vxlan), + "peers": [(ipv4_1), (ipv4_2)] + (present if encapsulation is vxlan) + } + } + + The service endpoint ID should be previously informed to the WIM + engine in the RO when the WIM port mapping is registered. + + Keyword Arguments: + bandwidth (int): value in kilobytes + latency (int): value in milliseconds + + Other QoS might be passed as keyword arguments. + + Returns: + tuple: ``(service_id, conn_info)`` containing: + - *service_uuid* (str): UUID of the established connectivity + service + - *conn_info* (dict or None): Information to be stored at the + database (or ``None``). This information will be provided to + the :meth:`~.edit_connectivity_service` and :obj:`~.delete`. + **MUST** be JSON/YAML-serializable (plain data structures). + + Raises: + SdnConnectorException: In case of error. + """ + SETTINGS = { # min_endpoints, max_endpoints, vpn_service_type + 'ELINE': (2, 2, 'vpws'), # Virtual Private Wire Service + 'ELAN' : (2, None, 'vpls'), # Virtual Private LAN Service + } + settings = SETTINGS.get(service_type) + if settings is None: raise NotImplementedError('Unsupported service_type({:s})'.format(str(service_type))) + min_endpoints, max_endpoints, vpn_service_type = settings + + if max_endpoints is not None and len(connection_points) > max_endpoints: + msg = "Connections between more than {:d} endpoints are not supported for service_type {:s}" + raise SdnConnectorError(msg.format(max_endpoints, service_type)) + + if min_endpoints is not None and len(connection_points) < min_endpoints: + msg = "Connections must be of at least {:d} endpoints for service_type {:s}" + raise SdnConnectorError(msg.format(min_endpoints, service_type)) + + """First step, create the vpn service""" + uuid_l2vpn = str(uuid.uuid4()) + vpn_service = {} + vpn_service["vpn-id"] = uuid_l2vpn + vpn_service["vpn-svc-type"] = vpn_service_type + vpn_service["svc-topo"] = "any-to-any" + vpn_service["customer-name"] = "osm" + vpn_service_list = [] + vpn_service_list.append(vpn_service) + vpn_service_l = {"ietf-l2vpn-svc:vpn-service": vpn_service_list} + response_service_creation = None + conn_info = [] + self.logger.info("Sending vpn-service : {:s}".format(str(vpn_service_l))) + + try: + endpoint_service_creation = ( + "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format( + self.wim["wim_url"] + ) + ) + response_service_creation = requests.post( + endpoint_service_creation, + headers=self.headers, + json=vpn_service_l, + auth=self.auth, + ) + except requests.exceptions.ConnectionError: + #raise SdnConnectorError( + # "Request to create service Timeout", http_code=408 + #) + pass + + #if response_service_creation.status_code == 409: + # raise SdnConnectorError( + # "Service already exists", + # http_code=response_service_creation.status_code, + # ) + #elif response_service_creation.status_code != requests.codes.created: + # raise SdnConnectorError( + # "Request to create service not accepted", + # http_code=response_service_creation.status_code, + # ) + + self.logger.info('connection_points = {:s}'.format(str(connection_points))) + + # Check if protected paths are requested + extended_connection_points = [] + for connection_point in connection_points: + extended_connection_points.append(connection_point) + + connection_point_wan_info = self.search_mapp(connection_point) + service_mapping_info = connection_point_wan_info.get('service_mapping_info', {}) + redundant_service_endpoint_ids = service_mapping_info.get('redundant') + + if redundant_service_endpoint_ids is None: continue + if len(redundant_service_endpoint_ids) == 0: continue + + for redundant_service_endpoint_id in redundant_service_endpoint_ids: + redundant_connection_point = copy.deepcopy(connection_point) + redundant_connection_point['service_endpoint_id'] = redundant_service_endpoint_id + extended_connection_points.append(redundant_connection_point) + + self.logger.info('extended_connection_points = {:s}'.format(str(extended_connection_points))) + + """Second step, create the connections and vpn attachments""" + for connection_point in extended_connection_points: + connection_point_wan_info = self.search_mapp(connection_point) + site_network_access = {} + connection = {} + + if connection_point["service_endpoint_encapsulation_type"] != "none": + if ( + connection_point["service_endpoint_encapsulation_type"] + == "dot1q" + ): + """The connection is a VLAN""" + connection["encapsulation-type"] = "dot1q-vlan-tagged" + tagged = {} + tagged_interf = {} + service_endpoint_encapsulation_info = connection_point[ + "service_endpoint_encapsulation_info" + ] + + if service_endpoint_encapsulation_info["vlan"] is None: + raise SdnConnectorError("VLAN must be provided") + + tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info[ + "vlan" + ] + tagged["dot1q-vlan-tagged"] = tagged_interf + connection["tagged-interface"] = tagged + else: + raise NotImplementedError("Encapsulation type not implemented") + + site_network_access["connection"] = connection + self.logger.info("Sending connection:{}".format(connection)) + vpn_attach = {} + vpn_attach["vpn-id"] = uuid_l2vpn + vpn_attach["site-role"] = vpn_service["svc-topo"] + "-role" + site_network_access["vpn-attachment"] = vpn_attach + self.logger.info("Sending vpn-attachement :{}".format(vpn_attach)) + uuid_sna = str(uuid.uuid4()) + site_network_access["network-access-id"] = uuid_sna + site_network_access["bearer"] = connection_point_wan_info[ + "service_mapping_info" + ]["bearer"] + + access_priority = connection_point_wan_info["service_mapping_info"].get("priority") + if access_priority is not None: + availability = {} + availability["access-priority"] = access_priority + availability["single-active"] = [None] + site_network_access["availability"] = availability + + constraint = {} + constraint['constraint-type'] = 'end-to-end-diverse' + constraint['target'] = {'all-other-accesses': [None]} + + access_diversity = {} + access_diversity['constraints'] = {'constraint': []} + access_diversity['constraints']['constraint'].append(constraint) + site_network_access["access-diversity"] = access_diversity + + site_network_accesses = {} + site_network_access_list = [] + site_network_access_list.append(site_network_access) + site_network_accesses[ + "ietf-l2vpn-svc:site-network-access" + ] = site_network_access_list + conn_info_d = {} + conn_info_d["site"] = connection_point_wan_info["service_mapping_info"][ + "site-id" + ] + conn_info_d["site-network-access-id"] = site_network_access[ + "network-access-id" + ] + conn_info_d["mapping"] = None + conn_info.append(conn_info_d) + + self.logger.info("Sending site_network_accesses : {:s}".format(str(site_network_accesses))) + + try: + endpoint_site_network_access_creation = ( + "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/" + "sites/site={}/site-network-accesses/".format( + self.wim["wim_url"], + connection_point_wan_info["service_mapping_info"][ + "site-id" + ], + ) + ) + response_endpoint_site_network_access_creation = requests.post( + endpoint_site_network_access_creation, + headers=self.headers, + json=site_network_accesses, + auth=self.auth, + ) + + if ( + response_endpoint_site_network_access_creation.status_code + == 409 + ): + self.delete_connectivity_service(vpn_service["vpn-id"]) + + raise SdnConnectorError( + "Site_Network_Access with ID '{}' already exists".format( + site_network_access["network-access-id"] + ), + http_code=response_endpoint_site_network_access_creation.status_code, + ) + elif ( + response_endpoint_site_network_access_creation.status_code + == 400 + ): + self.delete_connectivity_service(vpn_service["vpn-id"]) + + raise SdnConnectorError( + "Site {} does not exist".format( + connection_point_wan_info["service_mapping_info"][ + "site-id" + ] + ), + http_code=response_endpoint_site_network_access_creation.status_code, + ) + elif ( + response_endpoint_site_network_access_creation.status_code + != requests.codes.created + and response_endpoint_site_network_access_creation.status_code + != requests.codes.no_content + ): + self.delete_connectivity_service(vpn_service["vpn-id"]) + + raise SdnConnectorError( + "Request not accepted", + http_code=response_endpoint_site_network_access_creation.status_code, + ) + except requests.exceptions.ConnectionError: + #self.delete_connectivity_service(vpn_service["vpn-id"]) + + #raise SdnConnectorError("Request Timeout", http_code=408) + pass + + return uuid_l2vpn, conn_info + + def delete_connectivity_service(self, service_uuid, conn_info=None): + """Disconnect multi-site endpoints previously connected + + This method should receive as the first argument the UUID generated by + the ``create_connectivity_service`` + """ + try: + self.logger.info("Sending delete") + servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format( + self.wim["wim_url"], service_uuid + ) + response = requests.delete(servicepoint, auth=self.auth) + + if response.status_code != requests.codes.no_content: + raise SdnConnectorError( + "Error in the request", http_code=response.status_code + ) + except requests.exceptions.ConnectionError: + raise SdnConnectorError("Request Timeout", http_code=408) + + def edit_connectivity_service( + self, service_uuid, conn_info=None, connection_points=None, **kwargs + ): + """Change an existing connectivity service, see + ``create_connectivity_service``""" + # sites = {"sites": {}} + # site_list = [] + vpn_service = {} + vpn_service["svc-topo"] = "any-to-any" + counter = 0 + + for connection_point in connection_points: + site_network_access = {} + connection_point_wan_info = self.search_mapp(connection_point) + params_site = {} + params_site["site-id"] = connection_point_wan_info["service_mapping_info"][ + "site-id" + ] + params_site["site-vpn-flavor"] = "site-vpn-flavor-single" + device_site = {} + device_site["device-id"] = connection_point_wan_info["device-id"] + params_site["devices"] = device_site + # network_access = {} + connection = {} + + if connection_point["service_endpoint_encapsulation_type"] != "none": + if connection_point["service_endpoint_encapsulation_type"] == "dot1q": + """The connection is a VLAN""" + connection["encapsulation-type"] = "dot1q-vlan-tagged" + tagged = {} + tagged_interf = {} + service_endpoint_encapsulation_info = connection_point[ + "service_endpoint_encapsulation_info" + ] + + if service_endpoint_encapsulation_info["vlan"] is None: + raise SdnConnectorError("VLAN must be provided") + + tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info[ + "vlan" + ] + tagged["dot1q-vlan-tagged"] = tagged_interf + connection["tagged-interface"] = tagged + else: + raise NotImplementedError("Encapsulation type not implemented") + + site_network_access["connection"] = connection + vpn_attach = {} + vpn_attach["vpn-id"] = service_uuid + vpn_attach["site-role"] = vpn_service["svc-topo"] + "-role" + site_network_access["vpn-attachment"] = vpn_attach + uuid_sna = conn_info[counter]["site-network-access-id"] + site_network_access["network-access-id"] = uuid_sna + site_network_access["bearer"] = connection_point_wan_info[ + "service_mapping_info" + ]["bearer"] + site_network_accesses = {} + site_network_access_list = [] + site_network_access_list.append(site_network_access) + site_network_accesses[ + "ietf-l2vpn-svc:site-network-access" + ] = site_network_access_list + + try: + endpoint_site_network_access_edit = ( + "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/" + "sites/site={}/site-network-accesses/".format( + self.wim["wim_url"], + connection_point_wan_info["service_mapping_info"]["site-id"], + ) + ) + response_endpoint_site_network_access_creation = requests.put( + endpoint_site_network_access_edit, + headers=self.headers, + json=site_network_accesses, + auth=self.auth, + ) + + if response_endpoint_site_network_access_creation.status_code == 400: + raise SdnConnectorError( + "Service does not exist", + http_code=response_endpoint_site_network_access_creation.status_code, + ) + elif ( + response_endpoint_site_network_access_creation.status_code != 201 + and response_endpoint_site_network_access_creation.status_code + != 204 + ): + raise SdnConnectorError( + "Request no accepted", + http_code=response_endpoint_site_network_access_creation.status_code, + ) + except requests.exceptions.ConnectionError: + raise SdnConnectorError("Request Timeout", http_code=408) + + counter += 1 + + return None + + def clear_all_connectivity_services(self): + """Delete all WAN Links corresponding to a WIM""" + try: + self.logger.info("Sending clear all connectivity services") + servicepoint = ( + "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format( + self.wim["wim_url"] + ) + ) + response = requests.delete(servicepoint, auth=self.auth) + + if response.status_code != requests.codes.no_content: + raise SdnConnectorError( + "Unable to clear all connectivity services", + http_code=response.status_code, + ) + except requests.exceptions.ConnectionError: + raise SdnConnectorError("Request Timeout", http_code=408) + + def get_all_active_connectivity_services(self): + """Provide information about all active connections provisioned by a + WIM + """ + try: + self.logger.info("Sending get all connectivity services") + servicepoint = ( + "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format( + self.wim["wim_url"] + ) + ) + response = requests.get(servicepoint, auth=self.auth) + + if response.status_code != requests.codes.ok: + raise SdnConnectorError( + "Unable to get all connectivity services", + http_code=response.status_code, + ) + + return response + except requests.exceptions.ConnectionError: + raise SdnConnectorError("Request Timeout", http_code=408) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/acknowledgements.txt b/src/tests/l2_vpn_gnmi_oc/tests/acknowledgements.txt new file mode 100644 index 000000000..b7ce926dd --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/acknowledgements.txt @@ -0,0 +1,3 @@ +MockOSM is based on source code taken from: +https://osm.etsi.org/gitlab/osm/ro/-/blob/master/RO-plugin/osm_ro_plugin/sdnconn.py +https://osm.etsi.org/gitlab/osm/ro/-/blob/master/RO-SDN-ietfl2vpn/osm_rosdn_ietfl2vpn/wimconn_ietfl2vpn.py diff --git a/src/tests/l2_vpn_gnmi_oc/tests/sdnconn.py b/src/tests/l2_vpn_gnmi_oc/tests/sdnconn.py new file mode 100644 index 000000000..a1849c9ef --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/sdnconn.py @@ -0,0 +1,242 @@ +# -*- coding: utf-8 -*- +## +# Copyright 2018 University of Bristol - High Performance Networks Research +# Group +# All Rights Reserved. +# +# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique +# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: +# +# Neither the name of the University of Bristol nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# This work has been performed in the context of DCMS UK 5G Testbeds +# & Trials Programme and in the framework of the Metro-Haul project - +# funded by the European Commission under Grant number 761727 through the +# Horizon 2020 and 5G-PPP programmes. +## +"""The SDN connector is responsible for establishing both wide area network connectivity (WIM) +and intranet SDN connectivity. + +It receives information from ports to be connected . +""" + +import logging +from http import HTTPStatus + + +class SdnConnectorError(Exception): + """Base Exception for all connector related errors + provide the parameter 'http_code' (int) with the error code: + Bad_Request = 400 + Unauthorized = 401 (e.g. credentials are not valid) + Not_Found = 404 (e.g. try to edit or delete a non existing connectivity service) + Forbidden = 403 + Method_Not_Allowed = 405 + Not_Acceptable = 406 + Request_Timeout = 408 (e.g timeout reaching server, or cannot reach the server) + Conflict = 409 + Service_Unavailable = 503 + Internal_Server_Error = 500 + """ + + def __init__(self, message, http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value): + Exception.__init__(self, message) + self.http_code = http_code + + +class SdnConnectorBase(object): + """Abstract base class for all the SDN connectors + + Arguments: + wim (dict): WIM record, as stored in the database + wim_account (dict): WIM account record, as stored in the database + config + The arguments of the constructor are converted to object attributes. + An extra property, ``service_endpoint_mapping`` is created from ``config``. + """ + + def __init__(self, wim, wim_account, config=None, logger=None): + """ + :param wim: (dict). Contains among others 'wim_url' + :param wim_account: (dict). Contains among others 'uuid' (internal id), 'name', + 'sdn' (True if is intended for SDN-assist or False if intended for WIM), 'user', 'password'. + :param config: (dict or None): Particular information of plugin. These keys if present have a common meaning: + 'mapping_not_needed': (bool) False by default or if missing, indicates that mapping is not needed. + 'service_endpoint_mapping': (list) provides the internal endpoint mapping. The meaning is: + KEY meaning for WIM meaning for SDN assist + -------- -------- -------- + device_id pop_switch_dpid compute_id + device_interface_id pop_switch_port compute_pci_address + service_endpoint_id wan_service_endpoint_id SDN_service_endpoint_id + service_mapping_info wan_service_mapping_info SDN_service_mapping_info + contains extra information if needed. Text in Yaml format + switch_dpid wan_switch_dpid SDN_switch_dpid + switch_port wan_switch_port SDN_switch_port + datacenter_id vim_account vim_account + id: (internal, do not use) + wim_id: (internal, do not use) + :param logger (logging.Logger): optional logger object. If none is passed 'openmano.sdn.sdnconn' is used. + """ + self.logger = logger or logging.getLogger("ro.sdn") + self.wim = wim + self.wim_account = wim_account + self.config = config or {} + self.service_endpoint_mapping = self.config.get("service_endpoint_mapping", []) + + def check_credentials(self): + """Check if the connector itself can access the SDN/WIM with the provided url (wim.wim_url), + user (wim_account.user), and password (wim_account.password) + + Raises: + SdnConnectorError: Issues regarding authorization, access to + external URLs, etc are detected. + """ + raise NotImplementedError + + def get_connectivity_service_status(self, service_uuid, conn_info=None): + """Monitor the status of the connectivity service established + + Arguments: + service_uuid (str): UUID of the connectivity service + conn_info (dict or None): Information returned by the connector + during the service creation/edition and subsequently stored in + the database. + + Returns: + dict: JSON/YAML-serializable dict that contains a mandatory key + ``sdn_status`` associated with one of the following values:: + + {'sdn_status': 'ACTIVE'} + # The service is up and running. + + {'sdn_status': 'INACTIVE'} + # The service was created, but the connector + # cannot determine yet if connectivity exists + # (ideally, the caller needs to wait and check again). + + {'sdn_status': 'DOWN'} + # Connection was previously established, + # but an error/failure was detected. + + {'sdn_status': 'ERROR'} + # An error occurred when trying to create the service/ + # establish the connectivity. + + {'sdn_status': 'BUILD'} + # Still trying to create the service, the caller + # needs to wait and check again. + + Additionally ``error_msg``(**str**) and ``sdn_info``(**dict**) + keys can be used to provide additional status explanation or + new information available for the connectivity service. + """ + raise NotImplementedError + + def create_connectivity_service(self, service_type, connection_points, **kwargs): + """ + Establish SDN/WAN connectivity between the endpoints + :param service_type: (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2), ``L3``. + :param connection_points: (list): each point corresponds to + an entry point to be connected. For WIM: from the DC to the transport network. + For SDN: Compute/PCI to the transport network. One + connection point serves to identify the specific access and + some other service parameters, such as encapsulation type. + Each item of the list is a dict with: + "service_endpoint_id": (str)(uuid) Same meaning that for 'service_endpoint_mapping' (see __init__) + In case the config attribute mapping_not_needed is True, this value is not relevant. In this case + it will contain the string "device_id:device_interface_id" + "service_endpoint_encapsulation_type": None, "dot1q", ... + "service_endpoint_encapsulation_info": (dict) with: + "vlan": ..., (int, present if encapsulation is dot1q) + "vni": ... (int, present if encapsulation is vxlan), + "peers": [(ipv4_1), (ipv4_2)] (present if encapsulation is vxlan) + "mac": ... + "device_id": ..., same meaning that for 'service_endpoint_mapping' (see __init__) + "device_interface_id": same meaning that for 'service_endpoint_mapping' (see __init__) + "switch_dpid": ..., present if mapping has been found for this device_id,device_interface_id + "swith_port": ... present if mapping has been found for this device_id,device_interface_id + "service_mapping_info": present if mapping has been found for this device_id,device_interface_id + :param kwargs: For future versions: + bandwidth (int): value in kilobytes + latency (int): value in milliseconds + Other QoS might be passed as keyword arguments. + :return: tuple: ``(service_id, conn_info)`` containing: + - *service_uuid* (str): UUID of the established connectivity service + - *conn_info* (dict or None): Information to be stored at the database (or ``None``). + This information will be provided to the :meth:`~.edit_connectivity_service` and :obj:`~.delete`. + **MUST** be JSON/YAML-serializable (plain data structures). + :raises: SdnConnectorException: In case of error. Nothing should be created in this case. + Provide the parameter http_code + """ + raise NotImplementedError + + def delete_connectivity_service(self, service_uuid, conn_info=None): + """ + Disconnect multi-site endpoints previously connected + + :param service_uuid: The one returned by create_connectivity_service + :param conn_info: The one returned by last call to 'create_connectivity_service' or 'edit_connectivity_service' + if they do not return None + :return: None + :raises: SdnConnectorException: In case of error. The parameter http_code must be filled + """ + raise NotImplementedError + + def edit_connectivity_service( + self, service_uuid, conn_info=None, connection_points=None, **kwargs + ): + """Change an existing connectivity service. + + This method's arguments and return value follow the same convention as + :meth:`~.create_connectivity_service`. + + :param service_uuid: UUID of the connectivity service. + :param conn_info: (dict or None): Information previously returned by last call to create_connectivity_service + or edit_connectivity_service + :param connection_points: (list): If provided, the old list of connection points will be replaced. + :param kwargs: Same meaning that create_connectivity_service + :return: dict or None: Information to be updated and stored at the database. + When ``None`` is returned, no information should be changed. + When an empty dict is returned, the database record will be deleted. + **MUST** be JSON/YAML-serializable (plain data structures). + Raises: + SdnConnectorException: In case of error. + """ + + def clear_all_connectivity_services(self): + """Delete all WAN Links in a WIM. + + This method is intended for debugging only, and should delete all the + connections controlled by the WIM/SDN, not only the connections that + a specific RO is aware of. + + Raises: + SdnConnectorException: In case of error. + """ + raise NotImplementedError + + def get_all_active_connectivity_services(self): + """Provide information about all active connections provisioned by a + WIM. + + Raises: + SdnConnectorException: In case of error. + """ + raise NotImplementedError diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py index 92f52c688..ed02f7c62 100644 --- a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py @@ -12,44 +12,50 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging, os -from typing import Dict +import logging from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient -from .Fixtures import context_client # pylint: disable=unused-import -from .Tools import do_rest_get_request, do_rest_post_request +from .Fixtures import ( # pylint: disable=unused-import + # be careful, order of symbols is important here! + osm_wim, context_client +) +from .MockOSM import MockOSM +from .OSM_Constants import SERVICE_CONNECTION_POINTS, SERVICE_TYPE +logging.getLogger('ro.sdn.ietfl2vpn').setLevel(logging.DEBUG) LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -REQUEST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'ietf-l2vpn-service.json') ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) # pylint: disable=redefined-outer-name, unused-argument def test_service_ietf_creation( - context_client : ContextClient, + osm_wim : MockOSM, context_client : ContextClient ): - # Issue service creation request - with open(REQUEST_FILE, 'r', encoding='UTF-8') as f: - svc1_data = json.load(f) - URL = '/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services' - do_rest_post_request(URL, body=svc1_data, logger=LOGGER, expected_status_codes={201}) - vpn_id = svc1_data['ietf-l2vpn-svc:l2vpn-svc']['vpn-services']['vpn-service'][0]['vpn-id'] + osm_wim.create_connectivity_service(SERVICE_TYPE, SERVICE_CONNECTION_POINTS) + service_uuid = list(osm_wim.conn_info.keys())[0] # this test adds a single service - URL = '/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={:s}/'.format(vpn_id) - service_data = do_rest_get_request(URL, logger=LOGGER, expected_status_codes={200}) - service_uuid = service_data['service-id'] + result = osm_wim.get_connectivity_service_status(service_uuid) + assert 'sdn_status' in result + assert result['sdn_status'] == 'ACTIVE' - # Verify service was created + # Verify the scenario has 1 service and 0 slices response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 1 assert len(response.slice_ids) == 0 + # Check there are no slices + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.warning('Slices[{:d}] = {:s}'.format( + len(response.slices), grpc_message_to_json_string(response) + )) + assert len(response.slices) == 0 + # Check there is 1 service response = context_client.ListServices(ADMIN_CONTEXT_ID) LOGGER.warning('Services[{:d}] = {:s}'.format( diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py index f08dae2af..993a880e9 100644 --- a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py @@ -12,27 +12,30 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, os -from typing import Dict, Set, Tuple +import logging +from typing import Set from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient -from .Fixtures import context_client # pylint: disable=unused-import -from .Tools import do_rest_delete_request +from .Fixtures import ( # pylint: disable=unused-import + # be careful, order of symbols is important here! + osm_wim, context_client +) +from .MockOSM import MockOSM +logging.getLogger('ro.sdn.ietfl2vpn').setLevel(logging.DEBUG) LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -REQUEST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'ietf-l2vpn-service.json') ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) # pylint: disable=redefined-outer-name, unused-argument def test_service_ietf_removal( - context_client : ContextClient, # pylint: disable=redefined-outer-name + osm_wim : MockOSM, context_client : ContextClient ): # Verify the scenario has 1 service and 0 slices response = context_client.GetContext(ADMIN_CONTEXT_ID) @@ -41,12 +44,16 @@ def test_service_ietf_removal( # Check there are no slices response = context_client.ListSlices(ADMIN_CONTEXT_ID) - LOGGER.warning('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + LOGGER.warning('Slices[{:d}] = {:s}'.format( + len(response.slices), grpc_message_to_json_string(response) + )) assert len(response.slices) == 0 # Check there is 1 service response = context_client.ListServices(ADMIN_CONTEXT_ID) - LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + LOGGER.warning('Services[{:d}] = {:s}'.format( + len(response.services), grpc_message_to_json_string(response) + )) assert len(response.services) == 1 service_uuids : Set[str] = set() @@ -66,10 +73,9 @@ def test_service_ietf_removal( # Identify service to delete assert len(service_uuids) == 1 - service_uuid = set(service_uuids).pop() + service_uuid = service_uuids.pop() - URL = '/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={:s}/'.format(service_uuid) - do_rest_delete_request(URL, logger=LOGGER, expected_status_codes={204}) + osm_wim.delete_connectivity_service(service_uuid) # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_create.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_create.py index 87c0f7909..3e7abd640 100644 --- a/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_create.py +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_create.py @@ -27,7 +27,10 @@ from .Fixtures import context_client, device_client, service_client # pyl LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-service.json') +DESCRIPTOR_FILE = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + '..', 'data', 'tfs-service-vlan-125-tagged.json' +) ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_remove.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_remove.py index 15236da94..137848f53 100644 --- a/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_remove.py +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_remove.py @@ -27,7 +27,10 @@ from .Fixtures import context_client, service_client # pylint: disable=un LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-service.json') +DESCRIPTOR_FILE = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + '..', 'data', 'tfs-service-vlan-125-tagged.json' +) ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) -- GitLab From 4fa32a0760e15346324ad60b2f704ac3137da07d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 19 Jan 2026 10:24:07 +0000 Subject: [PATCH 25/79] NBI component - IETF L2 VPN: - Implemented default bearer generation - Implemented addition of VLAN-Id as service setting --- src/nbi/service/ietf_l2vpn/Handlers.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/nbi/service/ietf_l2vpn/Handlers.py b/src/nbi/service/ietf_l2vpn/Handlers.py index 775c0aab0..2edcffdd0 100644 --- a/src/nbi/service/ietf_l2vpn/Handlers.py +++ b/src/nbi/service/ietf_l2vpn/Handlers.py @@ -127,8 +127,15 @@ def process_site_network_access( mapping = BEARER_MAPPINGS.get(bearer_reference) if mapping is None: - msg = 'Specified Bearer({:s}) is not configured.' - raise Exception(msg.format(str(bearer_reference))) + if ':' not in bearer_reference: + MSG = 'Bearer({:s}) not found; unable to auto-generated mapping' + raise Exception(MSG.format(str(bearer_reference))) + mapping = str(bearer_reference).split(':', maxsplit=1) + mapping.extend([None, None, None, None, None, None, None]) + mapping = tuple(mapping) + MSG = 'Bearer({:s}) not found; auto-generated mapping: {:s}' + LOGGER.warning(MSG.format(str(bearer_reference), str(mapping))) + ( device_uuid, endpoint_uuid, router_id, route_dist, sub_if_index, address_ip, address_prefix, remote_router, circuit_id @@ -172,12 +179,14 @@ def process_site_network_access( service_settings_key = '/settings' if service_mtu is None: service_mtu = DEFAULT_MTU - update_config_rule_custom(config_rules, service_settings_key, { + field_updates = { 'mtu' : (service_mtu, True), #'address_families': (DEFAULT_ADDRESS_FAMILIES, True), #'bgp_as' : (DEFAULT_BGP_AS, True), #'bgp_route_target': (DEFAULT_BGP_ROUTE_TARGET, True), - }) + } + if cvlan_tag_id is not None: field_updates['vlan_id' ] = (cvlan_tag_id, True) + update_config_rule_custom(config_rules, service_settings_key, field_updates) #ENDPOINT_SETTINGS_KEY = '/device[{:s}]/endpoint[{:s}]/vlan[{:d}]/settings' #endpoint_settings_key = ENDPOINT_SETTINGS_KEY.format(device_uuid, endpoint_uuid, cvlan_tag_id) -- GitLab From 9b6ec2a22abdbea3dafc735a2f35a6c156e67a5d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 19 Jan 2026 10:46:47 +0000 Subject: [PATCH 26/79] End-to-end test - L2 VPN gNMI OpenConfig: - Added l2_vpn_gnmi_oc integration test to CI/CD pipeline --- src/tests/.gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index 723c26d6f..ceeb30767 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -27,6 +27,7 @@ include: #- local: '/src/tests/ryu-openflow/.gitlab-ci.yml' - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' - local: '/src/tests/acl_end2end/.gitlab-ci.yml' + - local: '/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml' - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' -- GitLab From e2476cca8368400d374dfdbfc0efd6a5a5a43bec Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 19 Jan 2026 14:17:40 +0000 Subject: [PATCH 27/79] CI/CD pipeline: - Disabled all integration tests but L2 VPN gNMI OpenConfig --- .gitlab-ci.yml | 76 ++++++++++++++++++++-------------------- src/tests/.gitlab-ci.yml | 34 +++++++++--------- 2 files changed, 55 insertions(+), 55 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e11c8474a..e3286d05f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -22,44 +22,44 @@ stages: # include the individual .gitlab-ci.yml of each micro-service and tests include: - #- local: '/manifests/.gitlab-ci.yml' - - local: '/src/monitoring/.gitlab-ci.yml' - - local: '/src/nbi/.gitlab-ci.yml' - - local: '/src/context/.gitlab-ci.yml' - - local: '/src/device/.gitlab-ci.yml' - - local: '/src/service/.gitlab-ci.yml' - - local: '/src/qkd_app/.gitlab-ci.yml' - - local: '/src/dbscanserving/.gitlab-ci.yml' - - local: '/src/opticalattackmitigator/.gitlab-ci.yml' - - local: '/src/opticalattackdetector/.gitlab-ci.yml' - - local: '/src/opticalattackmanager/.gitlab-ci.yml' - - local: '/src/opticalcontroller/.gitlab-ci.yml' - - local: '/src/ztp/.gitlab-ci.yml' - - local: '/src/policy/.gitlab-ci.yml' - - local: '/src/automation/.gitlab-ci.yml' - - local: '/src/forecaster/.gitlab-ci.yml' - #- local: '/src/webui/.gitlab-ci.yml' - #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' - #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' - #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' - - local: '/src/slice/.gitlab-ci.yml' - #- local: '/src/interdomain/.gitlab-ci.yml' - - local: '/src/pathcomp/.gitlab-ci.yml' - #- local: '/src/dlt/.gitlab-ci.yml' - - local: '/src/load_generator/.gitlab-ci.yml' - - local: '/src/bgpls_speaker/.gitlab-ci.yml' - - local: '/src/kpi_manager/.gitlab-ci.yml' - - local: '/src/kpi_value_api/.gitlab-ci.yml' - #- local: '/src/kpi_value_writer/.gitlab-ci.yml' - #- local: '/src/telemetry/.gitlab-ci.yml' - - local: '/src/analytics/.gitlab-ci.yml' - - local: '/src/qos_profile/.gitlab-ci.yml' - - local: '/src/vnt_manager/.gitlab-ci.yml' - - local: '/src/e2e_orchestrator/.gitlab-ci.yml' - - local: '/src/ztp_server/.gitlab-ci.yml' - - local: '/src/osm_client/.gitlab-ci.yml' - - local: '/src/simap_connector/.gitlab-ci.yml' - - local: '/src/pluggables/.gitlab-ci.yml' +# #- local: '/manifests/.gitlab-ci.yml' +# - local: '/src/monitoring/.gitlab-ci.yml' +# - local: '/src/nbi/.gitlab-ci.yml' +# - local: '/src/context/.gitlab-ci.yml' +# - local: '/src/device/.gitlab-ci.yml' +# - local: '/src/service/.gitlab-ci.yml' +# - local: '/src/qkd_app/.gitlab-ci.yml' +# - local: '/src/dbscanserving/.gitlab-ci.yml' +# - local: '/src/opticalattackmitigator/.gitlab-ci.yml' +# - local: '/src/opticalattackdetector/.gitlab-ci.yml' +# - local: '/src/opticalattackmanager/.gitlab-ci.yml' +# - local: '/src/opticalcontroller/.gitlab-ci.yml' +# - local: '/src/ztp/.gitlab-ci.yml' +# - local: '/src/policy/.gitlab-ci.yml' +# - local: '/src/automation/.gitlab-ci.yml' +# - local: '/src/forecaster/.gitlab-ci.yml' +# #- local: '/src/webui/.gitlab-ci.yml' +# #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' +# #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' +# #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' +# - local: '/src/slice/.gitlab-ci.yml' +# #- local: '/src/interdomain/.gitlab-ci.yml' +# - local: '/src/pathcomp/.gitlab-ci.yml' +# #- local: '/src/dlt/.gitlab-ci.yml' +# - local: '/src/load_generator/.gitlab-ci.yml' +# - local: '/src/bgpls_speaker/.gitlab-ci.yml' +# - local: '/src/kpi_manager/.gitlab-ci.yml' +# - local: '/src/kpi_value_api/.gitlab-ci.yml' +# #- local: '/src/kpi_value_writer/.gitlab-ci.yml' +# #- local: '/src/telemetry/.gitlab-ci.yml' +# - local: '/src/analytics/.gitlab-ci.yml' +# - local: '/src/qos_profile/.gitlab-ci.yml' +# - local: '/src/vnt_manager/.gitlab-ci.yml' +# - local: '/src/e2e_orchestrator/.gitlab-ci.yml' +# - local: '/src/ztp_server/.gitlab-ci.yml' +# - local: '/src/osm_client/.gitlab-ci.yml' +# - local: '/src/simap_connector/.gitlab-ci.yml' +# - local: '/src/pluggables/.gitlab-ci.yml' # This should be last one: end-to-end integration tests - local: '/src/tests/.gitlab-ci.yml' diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index ceeb30767..36e92d77a 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -14,22 +14,22 @@ # include the individual .gitlab-ci.yml of each end-to-end integration test include: - - local: '/src/tests/ofc22/.gitlab-ci.yml' - #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' - - local: '/src/tests/ecoc22/.gitlab-ci.yml' - #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' - #- local: '/src/tests/ofc23/.gitlab-ci.yml' - - local: '/src/tests/ofc24/.gitlab-ci.yml' - - local: '/src/tests/eucnc24/.gitlab-ci.yml' - #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' - #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' - #- local: '/src/tests/ofc25/.gitlab-ci.yml' - #- local: '/src/tests/ryu-openflow/.gitlab-ci.yml' - - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' - - local: '/src/tests/acl_end2end/.gitlab-ci.yml' +# - local: '/src/tests/ofc22/.gitlab-ci.yml' +# #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' +# - local: '/src/tests/ecoc22/.gitlab-ci.yml' +# #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' +# #- local: '/src/tests/ofc23/.gitlab-ci.yml' +# - local: '/src/tests/ofc24/.gitlab-ci.yml' +# - local: '/src/tests/eucnc24/.gitlab-ci.yml' +# #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' +# #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' +# #- local: '/src/tests/ofc25/.gitlab-ci.yml' +# #- local: '/src/tests/ryu-openflow/.gitlab-ci.yml' +# - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' +# - local: '/src/tests/acl_end2end/.gitlab-ci.yml' - local: '/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml' - - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' - - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' - - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' - - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' +# - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' +# - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' +# - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' +# - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' -- GitLab From 88afb1ee33ab3e0a01aaecd3cced4f81194d376b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 19 Jan 2026 14:18:08 +0000 Subject: [PATCH 28/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed Arista cEOS image version --- src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml index 44dfb9b1b..1b6762c6d 100644 --- a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml +++ b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml @@ -29,7 +29,8 @@ topology: #image: ceos:4.31.5M # tested, works #image: ceos:4.32.0F #image: ceos:4.33.5M - image: ceos:4.34.4M + #image: ceos:4.34.4M + image: ceos:4.32.2F #image: ceos:4.32.2.1F #image: ceos:4.33.1F # does not work, libyang.util.LibyangError: failed to parse data tree: No module named "openconfig-platform-healthz" in the context. linux: -- GitLab From 3666f390f444f55112423731b8c529238eea8615 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 19 Jan 2026 14:29:27 +0000 Subject: [PATCH 29/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed imports in python code --- src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py b/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py index 70b689ede..fae8401bb 100644 --- a/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py +++ b/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py @@ -18,7 +18,7 @@ from common.Settings import get_service_host, get_service_port_http from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from service.client.ServiceClient import ServiceClient -from tests.tools.mock_osm.MockOSM import MockOSM +from .MockOSM import MockOSM from .OSM_Constants import WIM_MAPPING NBI_ADDRESS = get_service_host(ServiceNameEnum.NBI) -- GitLab From b3bfef03df7fe5a4e260edca0e0c66de7080e0bb Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 19 Jan 2026 14:43:02 +0000 Subject: [PATCH 30/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed ContainerLab scenario IP addresses --- src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml index 1b6762c6d..ae7eb78d4 100644 --- a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml +++ b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml @@ -59,7 +59,7 @@ topology: exec: - ip link set address 00:c1:ab:00:01:0a dev eth1 - ip link add link eth1 name eth1.125 type vlan id 125 - - ip addr add 172.16.2.10/24 dev eth1.125 + - ip addr add 172.16.1.10/24 dev eth1.125 - ip link set eth1.125 up dc2: @@ -68,7 +68,7 @@ topology: exec: - ip link set address 00:c1:ab:00:01:14 dev eth1 - ip link add link eth1 name eth1.125 type vlan id 125 - - ip addr add 172.16.2.20/24 dev eth1.125 + - ip addr add 172.16.1.20/24 dev eth1.125 - ip link set eth1.125 up links: -- GitLab From 803c0c0266da0e59dec8ed403620cc732010e7b5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 19 Jan 2026 14:57:05 +0000 Subject: [PATCH 31/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed ping_check regular expression --- src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml index fd996680e..50779b484 100644 --- a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml +++ b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml @@ -211,8 +211,8 @@ end2end_test l2_vpn_gnmi_oc: # Run end-to-end test: test no connectivity with ping - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" - - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received, 100% packet loss" - - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received, 100% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" # Run end-to-end test: configure service TFS - > @@ -229,7 +229,7 @@ end2end_test l2_vpn_gnmi_oc: # Run end-to-end test: test connectivity with ping - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 3 received, 0% packet loss" - - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received, 100% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" # Run end-to-end test: deconfigure service TFS - > @@ -245,8 +245,8 @@ end2end_test l2_vpn_gnmi_oc: # Run end-to-end test: test no connectivity with ping - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" - - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received, 100% packet loss" - - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received, 100% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" # Run end-to-end test: configure service IETF - > @@ -263,7 +263,7 @@ end2end_test l2_vpn_gnmi_oc: # Run end-to-end test: test connectivity with ping - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 3 received, 0% packet loss" - - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received, 100% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" # Run end-to-end test: deconfigure service IETF - > @@ -279,8 +279,8 @@ end2end_test l2_vpn_gnmi_oc: # Run end-to-end test: test no connectivity with ping - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" - - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received, 100% packet loss" - - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received, 100% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" # Run end-to-end test: cleanup scenario - > -- GitLab From c6c2819fceb6cf7e6a0b6b79865d9eb7b9f7743d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 19 Jan 2026 15:44:34 +0000 Subject: [PATCH 32/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed ping_check regular expression --- src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml index 50779b484..6e5d92082 100644 --- a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml +++ b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml @@ -211,8 +211,8 @@ end2end_test l2_vpn_gnmi_oc: # Run end-to-end test: test no connectivity with ping - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" - - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" - - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" # Run end-to-end test: configure service TFS - > @@ -229,7 +229,7 @@ end2end_test l2_vpn_gnmi_oc: # Run end-to-end test: test connectivity with ping - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 3 received, 0% packet loss" - - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" # Run end-to-end test: deconfigure service TFS - > @@ -245,8 +245,8 @@ end2end_test l2_vpn_gnmi_oc: # Run end-to-end test: test no connectivity with ping - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" - - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" - - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" # Run end-to-end test: configure service IETF - > @@ -263,7 +263,7 @@ end2end_test l2_vpn_gnmi_oc: # Run end-to-end test: test connectivity with ping - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 3 received, 0% packet loss" - - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" # Run end-to-end test: deconfigure service IETF - > @@ -279,8 +279,8 @@ end2end_test l2_vpn_gnmi_oc: # Run end-to-end test: test no connectivity with ping - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" - - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" - - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss$" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" # Run end-to-end test: cleanup scenario - > -- GitLab From 14837004c8dc9b5651ea7a27010f3e73ebe97551 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 19 Jan 2026 16:58:44 +0000 Subject: [PATCH 33/79] End-to-end test - L2 VPN gNMI OpenConfig: - Increased log level to DEBUG for device and service --- src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml index 6e5d92082..4ec2dd0e9 100644 --- a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml +++ b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml @@ -145,9 +145,9 @@ end2end_test l2_vpn_gnmi_oc: # Configure TeraFlowSDN deployment # Uncomment if DEBUG log level is needed for the components #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml - #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml + - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml - #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml + - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml - source src/tests/${TEST_NAME}/deploy_specs.sh -- GitLab From cda338a227bc29e9c4123d5da1f3b6c9dcb5138e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 19 Jan 2026 17:26:52 +0000 Subject: [PATCH 34/79] End-to-end test - L2 VPN gNMI OpenConfig: - Added interface up on containerlab --- src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml index ae7eb78d4..765ee3ef3 100644 --- a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml +++ b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml @@ -58,6 +58,7 @@ topology: mgmt-ipv4: 172.20.20.201 exec: - ip link set address 00:c1:ab:00:01:0a dev eth1 + - ip link set eth1 up - ip link add link eth1 name eth1.125 type vlan id 125 - ip addr add 172.16.1.10/24 dev eth1.125 - ip link set eth1.125 up @@ -67,6 +68,7 @@ topology: mgmt-ipv4: 172.20.20.202 exec: - ip link set address 00:c1:ab:00:01:14 dev eth1 + - ip link set eth1 up - ip link add link eth1 name eth1.125 type vlan id 125 - ip addr add 172.16.1.20/24 dev eth1.125 - ip link set eth1.125 up -- GitLab From ae61e83e2e6d4212a7e2d66c1f3a043a00ac6333 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 20 Jan 2026 14:56:15 +0000 Subject: [PATCH 35/79] End-to-end test - L2 VPN gNMI OpenConfig: - Added delay to check if routers simply take time to stabilize --- src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml index 4ec2dd0e9..cbf0f73d6 100644 --- a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml +++ b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml @@ -221,6 +221,9 @@ end2end_test l2_vpn_gnmi_oc: --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-create.sh + # Give time to routers for being configured and stabilized + - sleep 60 + # Dump configuration of the routers (after configure TFS service) - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" @@ -238,6 +241,9 @@ end2end_test l2_vpn_gnmi_oc: --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-remove.sh + # Give time to routers for being configured and stabilized + - sleep 60 + # Dump configuration of the routers (after deconfigure TFS service) - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" @@ -255,6 +261,9 @@ end2end_test l2_vpn_gnmi_oc: --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-create.sh + # Give time to routers for being configured and stabilized + - sleep 60 + # Dump configuration of the routers (after configure IETF service) - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" @@ -272,6 +281,9 @@ end2end_test l2_vpn_gnmi_oc: --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-remove.sh + # Give time to routers for being configured and stabilized + - sleep 60 + # Dump configuration of the routers (after deconfigure IETF service) - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" -- GitLab From 093829031e43d62f5a113443a562786e8f57026f Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 20 Jan 2026 16:49:23 +0000 Subject: [PATCH 36/79] NBI component - IETF L2VPN: - Added log messages to debug weird libyang.util.LibyangError: validation failed: Duplicate instance of "vpn-service" --- .../ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index eb0f246e6..2bcb4c234 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -20,6 +20,7 @@ from flask_restful import Resource from werkzeug.exceptions import UnsupportedMediaType from common.proto.context_pb2 import ServiceTypeEnum from common.tools.context_queries.Service import get_services +from common.tools.grpc.Tools import grpc_message_list_to_json_string from context.client.ContextClient import ContextClient from nbi.service._tools.Authentication import HTTP_AUTH from nbi.service._tools.HttpStatusCodes import ( @@ -117,7 +118,14 @@ class L2VPN_SiteNetworkAccesses(Resource): for service in get_services(context_client): if service.service_type != ServiceTypeEnum.SERVICETYPE_L2NM: continue + MSG = '[_prepare_request_payload] LOOP service={:s}' + LOGGER.warning(MSG.format(grpc_message_list_to_json_string(service))) + vpn_ids = [service.service_id.service_uuid.uuid, service.name] + + MSG = '[_prepare_request_payload] LOOP vpn_ids={:s}' + LOGGER.warning(MSG.format(str(vpn_ids))) + for vpn_id in vpn_ids: vpn_services.append({ 'vpn-id': vpn_id, @@ -128,6 +136,12 @@ class L2VPN_SiteNetworkAccesses(Resource): 'ce-vlan-cos-preservation': True, }) + MSG = '[_prepare_request_payload] LOOP vpn_services={:s}' + LOGGER.warning(MSG.format(str(vpn_services))) + + MSG = '[_prepare_request_payload] FINAL vpn_services={:s}' + LOGGER.warning(MSG.format(str(vpn_services))) + request_data = {'ietf-l2vpn-svc:l2vpn-svc': { 'vpn-services': { 'vpn-service': vpn_services @@ -145,6 +159,10 @@ class L2VPN_SiteNetworkAccesses(Resource): } }]} }} + + MSG = '[_prepare_request_payload] FINAL request_data={:s}' + LOGGER.warning(MSG.format(str(request_data))) + return request_data errors.append('Unexpected request: {:s}'.format(str(request_data))) -- GitLab From 6d4c428813db0c73175e286061855fb54797a3c7 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 20 Jan 2026 17:15:13 +0000 Subject: [PATCH 37/79] NBI component - IETF L2VPN: - Fixed log messaging --- src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 2bcb4c234..da5b5ba49 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -20,7 +20,7 @@ from flask_restful import Resource from werkzeug.exceptions import UnsupportedMediaType from common.proto.context_pb2 import ServiceTypeEnum from common.tools.context_queries.Service import get_services -from common.tools.grpc.Tools import grpc_message_list_to_json_string +from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from nbi.service._tools.Authentication import HTTP_AUTH from nbi.service._tools.HttpStatusCodes import ( @@ -119,7 +119,7 @@ class L2VPN_SiteNetworkAccesses(Resource): if service.service_type != ServiceTypeEnum.SERVICETYPE_L2NM: continue MSG = '[_prepare_request_payload] LOOP service={:s}' - LOGGER.warning(MSG.format(grpc_message_list_to_json_string(service))) + LOGGER.warning(MSG.format(grpc_message_to_json_string(service))) vpn_ids = [service.service_id.service_uuid.uuid, service.name] -- GitLab From 10339b3adaf87a44361ff0ef440f509a16ed5889 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 20 Jan 2026 17:48:19 +0000 Subject: [PATCH 38/79] NBI component - IETF L2VPN: - Fixed de-duplication of L2 service names/uuids - Fixed log messaging --- .../ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 22 +++++-------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index da5b5ba49..0a1fa51dc 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -118,14 +118,8 @@ class L2VPN_SiteNetworkAccesses(Resource): for service in get_services(context_client): if service.service_type != ServiceTypeEnum.SERVICETYPE_L2NM: continue - MSG = '[_prepare_request_payload] LOOP service={:s}' - LOGGER.warning(MSG.format(grpc_message_to_json_string(service))) - - vpn_ids = [service.service_id.service_uuid.uuid, service.name] - - MSG = '[_prepare_request_payload] LOOP vpn_ids={:s}' - LOGGER.warning(MSG.format(str(vpn_ids))) - + # De-duplicate services uuid/names in case service_uuid == service_name + vpn_ids = {service.service_id.service_uuid.uuid, service.name} for vpn_id in vpn_ids: vpn_services.append({ 'vpn-id': vpn_id, @@ -136,11 +130,8 @@ class L2VPN_SiteNetworkAccesses(Resource): 'ce-vlan-cos-preservation': True, }) - MSG = '[_prepare_request_payload] LOOP vpn_services={:s}' - LOGGER.warning(MSG.format(str(vpn_services))) - - MSG = '[_prepare_request_payload] FINAL vpn_services={:s}' - LOGGER.warning(MSG.format(str(vpn_services))) + MSG = '[_prepare_request_payload] vpn_services={:s}' + LOGGER.debug(MSG.format(str(vpn_services))) request_data = {'ietf-l2vpn-svc:l2vpn-svc': { 'vpn-services': { @@ -160,9 +151,8 @@ class L2VPN_SiteNetworkAccesses(Resource): }]} }} - MSG = '[_prepare_request_payload] FINAL request_data={:s}' - LOGGER.warning(MSG.format(str(request_data))) - + MSG = '[_prepare_request_payload] request_data={:s}' + LOGGER.debug(MSG.format(str(request_data))) return request_data errors.append('Unexpected request: {:s}'.format(str(request_data))) -- GitLab From 1961cfc0e24b21d09ec6e9e6a037e0ace09eb515 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 20 Jan 2026 18:18:37 +0000 Subject: [PATCH 39/79] Service component: - Fixed logging in method TaskExecutor::get_devices_from_connection() --- src/service/service/task_scheduler/TaskExecutor.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py index ff97fd931..e7230ba0e 100644 --- a/src/service/service/task_scheduler/TaskExecutor.py +++ b/src/service/service/task_scheduler/TaskExecutor.py @@ -333,7 +333,14 @@ class TaskExecutor: #controllers.setdefault(device_type, dict())[controller.device_id.device_uuid.uuid] = controller devices.setdefault(device_type, dict())[controller.device_id.device_uuid.uuid] = controller - LOGGER.debug('[get_devices_from_connection] devices = {:s}'.format(str(devices))) + plain_devices = { + device_type : { + device_uuid : grpc_message_to_json_string(device_grpc) + for device_uuid, device_grpc in device_dict.items() + } + for device_type, device_dict in devices.items() + } + LOGGER.debug('[get_devices_from_connection] devices = {:s}'.format(str(plain_devices))) #LOGGER.debug('[get_devices_from_connection] controllers = {:s}'.format(str(controllers))) #if len(devices) == 0 and len(controllers) > 0: # return controllers -- GitLab From d842fc917ead2285f430fb699ad1b554aea3df52 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 20 Jan 2026 18:19:06 +0000 Subject: [PATCH 40/79] End-to-end test - L2 VPN gNMI OpenConfig: - Enabled DEBUG log in NBI component --- src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml index cbf0f73d6..c3e175002 100644 --- a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml +++ b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml @@ -148,7 +148,7 @@ end2end_test l2_vpn_gnmi_oc: - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml - #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml + - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml - source src/tests/${TEST_NAME}/deploy_specs.sh #- export TFS_REGISTRY_IMAGES="${CI_REGISTRY_IMAGE}" -- GitLab From f590adf0211cb889a8a99b3b0c1c369468ff10a6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 20 Jan 2026 18:37:06 +0000 Subject: [PATCH 41/79] Test Tools - Mock OSM: - Fixed log messages --- src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py b/src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py index aa4ca045f..de940a7d2 100644 --- a/src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py +++ b/src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py @@ -193,7 +193,7 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): vpn_service_l = {"ietf-l2vpn-svc:vpn-service": vpn_service_list} response_service_creation = None conn_info = [] - self.logger.info("Sending vpn-service :{}".format(vpn_service_l)) + self.logger.info("Sending vpn-service : {:s}".format(str(vpn_service_l))) try: endpoint_service_creation = ( @@ -319,6 +319,8 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): conn_info_d["mapping"] = None conn_info.append(conn_info_d) + self.logger.info("Sending site_network_accesses : {:s}".format(str(site_network_accesses))) + try: endpoint_site_network_access_creation = ( "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/" -- GitLab From 8ac8e462e7275654ab83c7bd8bdc9b9cf3027700 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 20 Jan 2026 18:37:24 +0000 Subject: [PATCH 42/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed log messages in Mock OSM --- .../tests/WimconnectorIETFL2VPN.py | 48 +++++++++---------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/WimconnectorIETFL2VPN.py b/src/tests/l2_vpn_gnmi_oc/tests/WimconnectorIETFL2VPN.py index 6a616eb75..de940a7d2 100644 --- a/src/tests/l2_vpn_gnmi_oc/tests/WimconnectorIETFL2VPN.py +++ b/src/tests/l2_vpn_gnmi_oc/tests/WimconnectorIETFL2VPN.py @@ -69,14 +69,14 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): self.wim["wim_url"] ) - #try: - # response = requests.get(endpoint, auth=self.auth) - # http_code = response.status_code - #except requests.exceptions.RequestException as e: - # raise SdnConnectorError(e.response, http_code=503) + try: + response = requests.get(endpoint, auth=self.auth) + http_code = response.status_code + except requests.exceptions.RequestException as e: + raise SdnConnectorError(e.response, http_code=503) - #if http_code != 200: - # raise SdnConnectorError("Failed while authenticating", http_code=http_code) + if http_code != 200: + raise SdnConnectorError("Failed while authenticating", http_code=http_code) self.logger.info("Credentials checked") @@ -208,21 +208,20 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): auth=self.auth, ) except requests.exceptions.ConnectionError: - #raise SdnConnectorError( - # "Request to create service Timeout", http_code=408 - #) - pass - - #if response_service_creation.status_code == 409: - # raise SdnConnectorError( - # "Service already exists", - # http_code=response_service_creation.status_code, - # ) - #elif response_service_creation.status_code != requests.codes.created: - # raise SdnConnectorError( - # "Request to create service not accepted", - # http_code=response_service_creation.status_code, - # ) + raise SdnConnectorError( + "Request to create service Timeout", http_code=408 + ) + + if response_service_creation.status_code == 409: + raise SdnConnectorError( + "Service already exists", + http_code=response_service_creation.status_code, + ) + elif response_service_creation.status_code != requests.codes.created: + raise SdnConnectorError( + "Request to create service not accepted", + http_code=response_service_creation.status_code, + ) self.logger.info('connection_points = {:s}'.format(str(connection_points))) @@ -378,10 +377,9 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): http_code=response_endpoint_site_network_access_creation.status_code, ) except requests.exceptions.ConnectionError: - #self.delete_connectivity_service(vpn_service["vpn-id"]) + self.delete_connectivity_service(vpn_service["vpn-id"]) - #raise SdnConnectorError("Request Timeout", http_code=408) - pass + raise SdnConnectorError("Request Timeout", http_code=408) return uuid_l2vpn, conn_info -- GitLab From 8fa54ce38d3b6a609ee612163a831b4f6ecb5d67 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 21 Jan 2026 11:10:37 +0000 Subject: [PATCH 43/79] NBI component: - Enabled DEBUG mode for NBI component --- manifests/nbiservice.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml index 27026cc0f..1556eb28b 100644 --- a/manifests/nbiservice.yaml +++ b/manifests/nbiservice.yaml @@ -39,9 +39,9 @@ spec: #- containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" - name: FLASK_ENV - value: "production" # normal value is "production", change to "development" if developing + value: "development" # normal value is "production", change to "development" if developing - name: IETF_NETWORK_RENDERER value: "LIBYANG" envFrom: -- GitLab From 4912ae0eacfe51f10084642fad870698a0bcedd9 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 21 Jan 2026 11:11:30 +0000 Subject: [PATCH 44/79] NBI component - IETF L2VPN connector: - Minor code cleanup --- src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 0a1fa51dc..ec5addc65 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -20,7 +20,6 @@ from flask_restful import Resource from werkzeug.exceptions import UnsupportedMediaType from common.proto.context_pb2 import ServiceTypeEnum from common.tools.context_queries.Service import get_services -from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from nbi.service._tools.Authentication import HTTP_AUTH from nbi.service._tools.HttpStatusCodes import ( -- GitLab From dea02d562a160fa5c6c0d30a7c7c92e8d2ffda2a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 21 Jan 2026 11:25:46 +0000 Subject: [PATCH 45/79] NBI component - IETF L2VPN connector: - Forced LOG LEVEL to DEBUG --- src/nbi/service/NbiApplication.py | 1 + src/nbi/service/app.py | 3 ++- src/nbi/service/ietf_l2vpn/Handlers.py | 1 + src/nbi/service/ietf_l2vpn/L2VPN_Service.py | 1 + src/nbi/service/ietf_l2vpn/L2VPN_Services.py | 1 + src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 1 + 6 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/nbi/service/NbiApplication.py b/src/nbi/service/NbiApplication.py index ad02c754c..7a5a5d67c 100644 --- a/src/nbi/service/NbiApplication.py +++ b/src/nbi/service/NbiApplication.py @@ -23,6 +23,7 @@ from nbi.Config import SECRET_KEY LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) def log_request(response): timestamp = time.strftime('[%Y-%b-%d %H:%M]') diff --git a/src/nbi/service/app.py b/src/nbi/service/app.py index 7cb7cb3e7..b41e22b6a 100644 --- a/src/nbi/service/app.py +++ b/src/nbi/service/app.py @@ -49,7 +49,8 @@ from .tfs_api import register_tfs_api from .vntm_recommend import register_vntm_recommend from .well_known_meta import register_well_known -LOG_LEVEL = get_log_level() +#LOG_LEVEL = get_log_level() +LOG_LEVEL = logging.DEBUG logging.basicConfig( level=LOG_LEVEL, format="[Worker-%(process)d][%(asctime)s] %(levelname)s:%(name)s:%(message)s", diff --git a/src/nbi/service/ietf_l2vpn/Handlers.py b/src/nbi/service/ietf_l2vpn/Handlers.py index 2edcffdd0..e864ddebb 100644 --- a/src/nbi/service/ietf_l2vpn/Handlers.py +++ b/src/nbi/service/ietf_l2vpn/Handlers.py @@ -33,6 +33,7 @@ from .Constants import ( ) LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) def create_service( service_uuid : str, context_uuid : Optional[str] = DEFAULT_CONTEXT_NAME diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_Service.py b/src/nbi/service/ietf_l2vpn/L2VPN_Service.py index 070a548b5..c7b6137cc 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_Service.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_Service.py @@ -30,6 +30,7 @@ from .Handlers import update_vpn from .YangValidator import YangValidator LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) class L2VPN_Service(Resource): @HTTP_AUTH.login_required diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_Services.py b/src/nbi/service/ietf_l2vpn/L2VPN_Services.py index ccdad5c54..00c3d026a 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_Services.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_Services.py @@ -24,6 +24,7 @@ from .Handlers import process_site, process_vpn_service from .YangValidator import YangValidator LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) class L2VPN_Services(Resource): @HTTP_AUTH.login_required diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index ec5addc65..0c4cf38aa 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -29,6 +29,7 @@ from .Handlers import process_site_network_access from .YangValidator import YangValidator LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) class L2VPN_SiteNetworkAccesses(Resource): @HTTP_AUTH.login_required -- GitLab From 2fde17129074153ee583529d49274214f0ee14e2 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 21 Jan 2026 12:04:29 +0000 Subject: [PATCH 46/79] NBI component - IETF L2VPN connector: - Fixed parsing of site network accesses --- .../ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 0c4cf38aa..a034e482a 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -167,11 +167,17 @@ class L2VPN_SiteNetworkAccesses(Resource): request_data = yang_validator.parse_to_dict(request_data) yang_validator.destroy() - site_network_accesses = ( - request_data.get('site-network-accesses', dict()) - .get('site-network-access', list()) + sites = ( + request_data.get('ietf-l2vpn-svc:l2vpn-svc', dict()) + .get('sites', dict()) + .get('site', list()) ) - for site_network_access in site_network_accesses: - process_site_network_access(site_id, site_network_access, errors) + for site in sites: + site_network_accesses = ( + site.get('site-network-accesses', dict()) + .get('site-network-access', list()) + ) + for site_network_access in site_network_accesses: + process_site_network_access(site_id, site_network_access, errors) return errors -- GitLab From 256763acd1864dd948a7fcf750e9dfa3db26c465 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 21 Jan 2026 13:05:57 +0000 Subject: [PATCH 47/79] NBI component - IETF L2VPN connector: - Added logging of errors --- src/nbi/service/ietf_l2vpn/L2VPN_Service.py | 5 +++++ src/nbi/service/ietf_l2vpn/L2VPN_Services.py | 2 ++ src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 8 ++++++++ 3 files changed, 15 insertions(+) diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_Service.py b/src/nbi/service/ietf_l2vpn/L2VPN_Service.py index c7b6137cc..9d53fe259 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_Service.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_Service.py @@ -107,6 +107,11 @@ class L2VPN_Service(Resource): else: errors.append('Unexpected request format: {:s}'.format(str(request_data))) + if len(errors) > 0: + LOGGER.error('Errors: {:s}'.format(str(errors))) + else: + LOGGER.debug('Errors: {:s}'.format(str(errors))) + response = jsonify(errors) response.status_code = HTTP_NOCONTENT if len(errors) == 0 else HTTP_SERVERERROR return response diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_Services.py b/src/nbi/service/ietf_l2vpn/L2VPN_Services.py index 00c3d026a..daf7eb901 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_Services.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_Services.py @@ -76,6 +76,8 @@ class L2VPN_Services(Resource): if len(errors) > 0: LOGGER.error('Errors: {:s}'.format(str(errors))) + else: + LOGGER.debug('Errors: {:s}'.format(str(errors))) response = jsonify(errors) response.status_code = HTTP_CREATED if len(errors) == 0 else HTTP_SERVERERROR diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index a034e482a..51e141151 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -39,6 +39,10 @@ class L2VPN_SiteNetworkAccesses(Resource): LOGGER.debug('Site_Id: {:s}'.format(str(site_id))) LOGGER.debug('Request: {:s}'.format(str(request_data))) errors = self._process_site_network_accesses(site_id, request_data) + if len(errors) > 0: + LOGGER.error('Errors: {:s}'.format(str(errors))) + else: + LOGGER.debug('Errors: {:s}'.format(str(errors))) response = jsonify(errors) response.status_code = HTTP_CREATED if len(errors) == 0 else HTTP_SERVERERROR return response @@ -50,6 +54,10 @@ class L2VPN_SiteNetworkAccesses(Resource): LOGGER.debug('Site_Id: {:s}'.format(str(site_id))) LOGGER.debug('Request: {:s}'.format(str(request_data))) errors = self._process_site_network_accesses(site_id, request_data) + if len(errors) > 0: + LOGGER.error('Errors: {:s}'.format(str(errors))) + else: + LOGGER.debug('Errors: {:s}'.format(str(errors))) response = jsonify(errors) response.status_code = HTTP_NOCONTENT if len(errors) == 0 else HTTP_SERVERERROR return response -- GitLab From 65ca396cd8884da59ba804cf5ce6e1b11fb158c8 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 21 Jan 2026 14:15:35 +0000 Subject: [PATCH 48/79] NBI component - IETF L2VPN connector: - Added logging for site network accesses --- .../ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 51e141151..f3b4cc735 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -167,25 +167,46 @@ class L2VPN_SiteNetworkAccesses(Resource): return None def _process_site_network_accesses(self, site_id : str, request_data : Dict) -> List[Dict]: + LOGGER.warning('[_process_site_network_accesses] A') errors = list() request_data = self._prepare_request_payload(site_id, request_data, errors) + LOGGER.warning('[_process_site_network_accesses] B') + MSG = '[_process_site_network_accesses] errors={:s}' + LOGGER.debug(MSG.format(str(errors))) if len(errors) > 0: return errors + LOGGER.warning('[_process_site_network_accesses] C') yang_validator = YangValidator('ietf-l2vpn-svc') + LOGGER.warning('[_process_site_network_accesses] D') request_data = yang_validator.parse_to_dict(request_data) + LOGGER.warning('[_process_site_network_accesses] E') yang_validator.destroy() + LOGGER.warning('[_process_site_network_accesses] F') + MSG = '[_process_site_network_accesses] request_data={:s}' + LOGGER.debug(MSG.format(str(request_data))) sites = ( request_data.get('ietf-l2vpn-svc:l2vpn-svc', dict()) .get('sites', dict()) .get('site', list()) ) + MSG = '[_process_site_network_accesses] sites={:s}' + LOGGER.debug(MSG.format(str(sites))) for site in sites: + MSG = '[_process_site_network_accesses] site={:s}' + LOGGER.debug(MSG.format(str(site))) site_network_accesses = ( site.get('site-network-accesses', dict()) .get('site-network-access', list()) ) + MSG = '[_process_site_network_accesses] site_network_accesses={:s}' + LOGGER.debug(MSG.format(str(site_network_accesses))) for site_network_access in site_network_accesses: + MSG = '[_process_site_network_accesses] site_network_access={:s}' + LOGGER.debug(MSG.format(str(site_network_access))) process_site_network_access(site_id, site_network_access, errors) + LOGGER.warning('[_process_site_network_accesses] G') + MSG = '[_process_site_network_accesses] errors={:s}' + LOGGER.debug(MSG.format(str(errors))) return errors -- GitLab From 131bb963a5edfeacd51d6f16e408b2c64e2dd73f Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 21 Jan 2026 15:01:07 +0000 Subject: [PATCH 49/79] NBI component - IETF L2VPN connector: - Fixed parsing of L2 Site Network Accesses --- src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index f3b4cc735..384469f44 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -186,7 +186,7 @@ class L2VPN_SiteNetworkAccesses(Resource): MSG = '[_process_site_network_accesses] request_data={:s}' LOGGER.debug(MSG.format(str(request_data))) sites = ( - request_data.get('ietf-l2vpn-svc:l2vpn-svc', dict()) + request_data.get('l2vpn-svc', dict()) .get('sites', dict()) .get('site', list()) ) -- GitLab From 2ff7f1b53831f993acf273335efb07c7125682cc Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 21 Jan 2026 16:54:50 +0000 Subject: [PATCH 50/79] NBI component - IETF L2VPN connector: - Fixed parsing of L2 Site Network Accesses --- src/nbi/service/ietf_l2vpn/Handlers.py | 111 ++++++++++++++++--------- 1 file changed, 72 insertions(+), 39 deletions(-) diff --git a/src/nbi/service/ietf_l2vpn/Handlers.py b/src/nbi/service/ietf_l2vpn/Handlers.py index e864ddebb..43779de4c 100644 --- a/src/nbi/service/ietf_l2vpn/Handlers.py +++ b/src/nbi/service/ietf_l2vpn/Handlers.py @@ -65,37 +65,87 @@ def process_site_network_access( site_id : str, network_access : Dict, errors : List[Dict] ) -> None: try: - site_network_access_type = network_access['site-network-access-type'] - site_network_access_type = site_network_access_type.replace('ietf-l2vpn-svc:', '') - if site_network_access_type != 'multipoint': - MSG = 'Site Network Access Type: {:s}' - msg = MSG.format(str(network_access['site-network-access-type'])) - raise NotImplementedError(msg) + #device_uuid = None + #endpoint_uuid = None + #if 'device-reference' in network_access: + # device_uuid = network_access['device-reference'] + # endpoint_uuid = network_access['network-access-id'] + + bearer_reference = None + if 'bearer' in network_access: + network_access_bearer = network_access['bearer'] + if 'bearer-reference' in network_access_bearer: + bearer_reference = network_access_bearer['bearer-reference'] + + bearer_mapping = BEARER_MAPPINGS.get(bearer_reference) + if bearer_mapping is None: + if ':' in bearer_reference: + bearer_mapping = str(bearer_reference).split(':', maxsplit=1) + bearer_mapping.extend([None, None, None, None, None, None, None]) + bearer_mapping = tuple(bearer_mapping) + MSG = 'Bearer({:s}) not found; auto-generated mapping: {:s}' + LOGGER.warning(MSG.format(str(bearer_reference), str(bearer_mapping))) + else: + MSG = 'Bearer({:s}) not found; unable to auto-generated mapping' + raise Exception(MSG.format(str(bearer_reference))) - access_role : str = network_access['vpn-attachment']['site-role'] - access_role = access_role.replace('ietf-l2vpn-svc:', '').replace('-role', '') # hub/spoke - if access_role not in {'hub', 'spoke'}: - MSG = 'Site VPN Attackment Role: {:s}' - raise NotImplementedError(MSG.format(str(network_access['site-network-access-type']))) + ( + device_uuid, endpoint_uuid, router_id, route_dist, sub_if_index, + address_ip, address_prefix, remote_router, circuit_id + ) = bearer_mapping - device_uuid = network_access['device-reference'] - endpoint_uuid = network_access['site-network-access-id'] service_uuid = network_access['vpn-attachment']['vpn-id'] - encapsulation_type = network_access['connection']['encapsulation-type'] - cvlan_tag_id = network_access['connection']['tagged-interface'][encapsulation_type]['cvlan-id'] - - bearer_reference = network_access['bearer']['bearer-reference'] - - service_mtu = network_access['service']['svc-mtu'] - service_input_bandwidth = network_access['service']['svc-input-bandwidth'] - service_output_bandwidth = network_access['service']['svc-output-bandwidth'] + network_access_connection = network_access['connection'] + encapsulation_type = network_access_connection['encapsulation-type'] + encapsulation_type = encapsulation_type.replace('ietf-l2vpn-svc:', '') + if encapsulation_type != 'vlan': + encapsulation_type = network_access_connection['encapsulation-type'] + MSG = 'EncapsulationType({:s}) not supported' + raise NotImplementedError(MSG.format(str(encapsulation_type))) + + cvlan_tag_id = None + if 'tagged-interface' in network_access_connection: + nac_tagged_if = network_access_connection['tagged-interface'] + nac_tagged_if_type = nac_tagged_if.get('type', 'priority-tagged') + nac_tagged_if_type = nac_tagged_if_type.replace('ietf-l2vpn-svc:', '') + if nac_tagged_if_type == 'dot1q': + encapsulation_data = nac_tagged_if['dot1q-vlan-tagged'] + tag_type = encapsulation_data.get('tg-type', 'c-vlan') + tag_type = tag_type.replace('ietf-l2vpn-svc:', '') + if tag_type == 'c-vlan': + cvlan_tag_id = encapsulation_data['cvlan-id'] + else: + tag_type = encapsulation_data.get('tg-type', 'c-vlan') + MSG = 'TagType({:s}) not supported' + raise NotImplementedError(MSG.format(str(tag_type))) + else: + nac_tagged_if_type = nac_tagged_if.get('type', 'priority-tagged') + MSG = 'TaggedInterfaceType({:s}) not supported' + raise NotImplementedError(MSG.format(str(nac_tagged_if_type))) + + network_access_service = network_access.get('service', dict()) + + service_mtu = network_access_service.get('svc-mtu', DEFAULT_MTU) + + service_input_bandwidth = network_access_service.get('svc-input-bandwidth') + service_bandwidth_bps + + service_output_bandwidth = network_access_service.get('svc-output-bandwidth') service_bandwidth_bps = max(service_input_bandwidth, service_output_bandwidth) service_bandwidth_gbps = service_bandwidth_bps / 1.e9 max_e2e_latency_ms = None availability = None - for qos_profile_class in network_access['service']['qos']['qos-profile']['classes']['class']: + + qos_profile_classes = ( + network_access.get('service', dict()) + .get('qos', dict()) + .get('qos-profile', dict()) + .get('classes', dict()) + .get('class', list()) + ) + for qos_profile_class in qos_profile_classes: if qos_profile_class['class-id'] != 'qos-realtime': MSG = 'Site Network Access QoS Class Id: {:s}' raise NotImplementedError(MSG.format(str(qos_profile_class['class-id']))) @@ -126,22 +176,6 @@ def process_site_network_access( single_active : bool = len(network_access_availability.get('single-active', [])) > 0 all_active : bool = len(network_access_availability.get('all-active', [])) > 0 - mapping = BEARER_MAPPINGS.get(bearer_reference) - if mapping is None: - if ':' not in bearer_reference: - MSG = 'Bearer({:s}) not found; unable to auto-generated mapping' - raise Exception(MSG.format(str(bearer_reference))) - mapping = str(bearer_reference).split(':', maxsplit=1) - mapping.extend([None, None, None, None, None, None, None]) - mapping = tuple(mapping) - MSG = 'Bearer({:s}) not found; auto-generated mapping: {:s}' - LOGGER.warning(MSG.format(str(bearer_reference), str(mapping))) - - ( - device_uuid, endpoint_uuid, router_id, route_dist, sub_if_index, - address_ip, address_prefix, remote_router, circuit_id - ) = mapping - context_client = ContextClient() service = get_service_by_uuid( context_client, service_uuid, context_uuid=DEFAULT_CONTEXT_NAME, rw_copy=True @@ -179,7 +213,6 @@ def process_site_network_access( update_constraint_sla_availability(constraints, num_disjoint_paths, all_active, 0.0) service_settings_key = '/settings' - if service_mtu is None: service_mtu = DEFAULT_MTU field_updates = { 'mtu' : (service_mtu, True), #'address_families': (DEFAULT_ADDRESS_FAMILIES, True), -- GitLab From bf46f35a60541a05d6510cad616af0a708308740 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 21 Jan 2026 17:19:11 +0000 Subject: [PATCH 51/79] NBI component - IETF L2VPN connector: - Fixed parsing of L2 Site Network Accesses --- src/nbi/service/ietf_l2vpn/Handlers.py | 37 +++++++++++++++++--------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/src/nbi/service/ietf_l2vpn/Handlers.py b/src/nbi/service/ietf_l2vpn/Handlers.py index 43779de4c..b44f35102 100644 --- a/src/nbi/service/ietf_l2vpn/Handlers.py +++ b/src/nbi/service/ietf_l2vpn/Handlers.py @@ -128,15 +128,26 @@ def process_site_network_access( service_mtu = network_access_service.get('svc-mtu', DEFAULT_MTU) - service_input_bandwidth = network_access_service.get('svc-input-bandwidth') - service_bandwidth_bps + max_bandwidth_gbps = None + max_e2e_latency_ms = None + availability = None + + service_bandwidth_bps = 0 + service_input_bandwidth = network_access_service.get('svc-input-bandwidth') + if service_input_bandwidth is not None: + service_input_bandwidth = float(service_input_bandwidth) + service_bandwidth_bps = max(service_bandwidth_bps, service_input_bandwidth) service_output_bandwidth = network_access_service.get('svc-output-bandwidth') - service_bandwidth_bps = max(service_input_bandwidth, service_output_bandwidth) - service_bandwidth_gbps = service_bandwidth_bps / 1.e9 + if service_output_bandwidth is not None: + service_output_bandwidth = float(service_output_bandwidth) + if service_bandwidth_bps is None: + service_bandwidth_bps = service_output_bandwidth + else: + service_bandwidth_bps = max(service_bandwidth_bps, service_output_bandwidth) - max_e2e_latency_ms = None - availability = None + if service_bandwidth_bps > 1.e-12: + max_bandwidth_gbps = service_bandwidth_bps / 1.e9 qos_profile_classes = ( network_access.get('service', dict()) @@ -156,8 +167,8 @@ def process_site_network_access( MSG = 'Site Network Access QoS Class Direction: {:s}' raise NotImplementedError(MSG.format(str(qos_profile_class['direction']))) - max_e2e_latency_ms = qos_profile_class['latency']['latency-boundary'] - availability = qos_profile_class['bandwidth']['guaranteed-bw-percent'] + max_e2e_latency_ms = float(qos_profile_class['latency']['latency-boundary']) + availability = float(qos_profile_class['bandwidth']['guaranteed-bw-percent']) network_access_diversity = network_access.get('access-diversity', {}) diversity_constraints = network_access_diversity.get('constraints', {}).get('constraint', []) @@ -192,8 +203,8 @@ def process_site_network_access( update_constraint_endpoint_location(constraints, endpoint_id, region=site_id) if access_priority is not None: update_constraint_endpoint_priority(constraints, endpoint_id, access_priority) - if service_bandwidth_gbps is not None: - update_constraint_sla_capacity(constraints, service_bandwidth_gbps) + if max_bandwidth_gbps is not None: + update_constraint_sla_capacity(constraints, max_bandwidth_gbps) if max_e2e_latency_ms is not None: update_constraint_sla_latency(constraints, max_e2e_latency_ms) if availability is not None: @@ -280,7 +291,7 @@ def update_site_network_access(network_access : Dict, errors : List[Dict]) -> No service_input_bandwidth = network_access['service']['svc-input-bandwidth'] service_output_bandwidth = network_access['service']['svc-output-bandwidth'] service_bandwidth_bps = max(service_input_bandwidth, service_output_bandwidth) - service_bandwidth_gbps = service_bandwidth_bps / 1.e9 + max_bandwidth_gbps = service_bandwidth_bps / 1.e9 max_e2e_latency_ms = None availability = None @@ -294,8 +305,8 @@ def update_site_network_access(network_access : Dict, errors : List[Dict]) -> No raise Exception(MSG.format(str(service_uuid))) constraints = service.service_constraints - if service_bandwidth_gbps is not None: - update_constraint_sla_capacity(constraints, service_bandwidth_gbps) + if max_bandwidth_gbps is not None: + update_constraint_sla_capacity(constraints, max_bandwidth_gbps) if max_e2e_latency_ms is not None: update_constraint_sla_latency(constraints, max_e2e_latency_ms) if availability is not None: -- GitLab From 17c0388e4dcf1480eea5020654e65a5be4cd812d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 21 Jan 2026 18:11:32 +0000 Subject: [PATCH 52/79] NBI component - IETF L2VPN connector: - Removed unneeded log messages --- .../ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 21 ------------------- 1 file changed, 21 deletions(-) diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 384469f44..6161939d9 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -167,46 +167,25 @@ class L2VPN_SiteNetworkAccesses(Resource): return None def _process_site_network_accesses(self, site_id : str, request_data : Dict) -> List[Dict]: - LOGGER.warning('[_process_site_network_accesses] A') errors = list() request_data = self._prepare_request_payload(site_id, request_data, errors) - LOGGER.warning('[_process_site_network_accesses] B') - MSG = '[_process_site_network_accesses] errors={:s}' - LOGGER.debug(MSG.format(str(errors))) if len(errors) > 0: return errors - LOGGER.warning('[_process_site_network_accesses] C') yang_validator = YangValidator('ietf-l2vpn-svc') - LOGGER.warning('[_process_site_network_accesses] D') request_data = yang_validator.parse_to_dict(request_data) - LOGGER.warning('[_process_site_network_accesses] E') yang_validator.destroy() - LOGGER.warning('[_process_site_network_accesses] F') - MSG = '[_process_site_network_accesses] request_data={:s}' - LOGGER.debug(MSG.format(str(request_data))) sites = ( request_data.get('l2vpn-svc', dict()) .get('sites', dict()) .get('site', list()) ) - MSG = '[_process_site_network_accesses] sites={:s}' - LOGGER.debug(MSG.format(str(sites))) for site in sites: - MSG = '[_process_site_network_accesses] site={:s}' - LOGGER.debug(MSG.format(str(site))) site_network_accesses = ( site.get('site-network-accesses', dict()) .get('site-network-access', list()) ) - MSG = '[_process_site_network_accesses] site_network_accesses={:s}' - LOGGER.debug(MSG.format(str(site_network_accesses))) for site_network_access in site_network_accesses: - MSG = '[_process_site_network_accesses] site_network_access={:s}' - LOGGER.debug(MSG.format(str(site_network_access))) process_site_network_access(site_id, site_network_access, errors) - LOGGER.warning('[_process_site_network_accesses] G') - MSG = '[_process_site_network_accesses] errors={:s}' - LOGGER.debug(MSG.format(str(errors))) return errors -- GitLab From 9f87c311f18c4c85322f2fc09e5ccbd691bbce43 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 21 Jan 2026 18:11:56 +0000 Subject: [PATCH 53/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed ietf-remove test --- src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py index 993a880e9..32d78b39d 100644 --- a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py @@ -75,6 +75,7 @@ def test_service_ietf_removal( assert len(service_uuids) == 1 service_uuid = service_uuids.pop() + osm_wim.conn_info[service_uuid] = dict() # delete just needs the placeholder to be populated osm_wim.delete_connectivity_service(service_uuid) # Verify the scenario has no services/slices -- GitLab From 52e2f74fd8c62302d148a2e1fc111084c1bb9dbe Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 21 Jan 2026 18:55:44 +0000 Subject: [PATCH 54/79] NBI component: - Recovered Manifest config --- manifests/nbiservice.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml index 1556eb28b..27026cc0f 100644 --- a/manifests/nbiservice.yaml +++ b/manifests/nbiservice.yaml @@ -39,9 +39,9 @@ spec: #- containerPort: 9192 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" - name: FLASK_ENV - value: "development" # normal value is "production", change to "development" if developing + value: "production" # normal value is "production", change to "development" if developing - name: IETF_NETWORK_RENDERER value: "LIBYANG" envFrom: -- GitLab From 3cd7c82b3ff93c75d6b88d67ac517d43e3eee9b2 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 21 Jan 2026 18:56:08 +0000 Subject: [PATCH 55/79] NBI component - IETF L2VPN connector: - Recovered log levels --- src/nbi/service/NbiApplication.py | 1 - src/nbi/service/app.py | 3 +-- src/nbi/service/ietf_l2vpn/Handlers.py | 1 - src/nbi/service/ietf_l2vpn/L2VPN_Service.py | 1 - src/nbi/service/ietf_l2vpn/L2VPN_Services.py | 1 - src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 1 - 6 files changed, 1 insertion(+), 7 deletions(-) diff --git a/src/nbi/service/NbiApplication.py b/src/nbi/service/NbiApplication.py index 7a5a5d67c..ad02c754c 100644 --- a/src/nbi/service/NbiApplication.py +++ b/src/nbi/service/NbiApplication.py @@ -23,7 +23,6 @@ from nbi.Config import SECRET_KEY LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) def log_request(response): timestamp = time.strftime('[%Y-%b-%d %H:%M]') diff --git a/src/nbi/service/app.py b/src/nbi/service/app.py index b41e22b6a..7cb7cb3e7 100644 --- a/src/nbi/service/app.py +++ b/src/nbi/service/app.py @@ -49,8 +49,7 @@ from .tfs_api import register_tfs_api from .vntm_recommend import register_vntm_recommend from .well_known_meta import register_well_known -#LOG_LEVEL = get_log_level() -LOG_LEVEL = logging.DEBUG +LOG_LEVEL = get_log_level() logging.basicConfig( level=LOG_LEVEL, format="[Worker-%(process)d][%(asctime)s] %(levelname)s:%(name)s:%(message)s", diff --git a/src/nbi/service/ietf_l2vpn/Handlers.py b/src/nbi/service/ietf_l2vpn/Handlers.py index b44f35102..81e5be981 100644 --- a/src/nbi/service/ietf_l2vpn/Handlers.py +++ b/src/nbi/service/ietf_l2vpn/Handlers.py @@ -33,7 +33,6 @@ from .Constants import ( ) LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) def create_service( service_uuid : str, context_uuid : Optional[str] = DEFAULT_CONTEXT_NAME diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_Service.py b/src/nbi/service/ietf_l2vpn/L2VPN_Service.py index 9d53fe259..13d46418c 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_Service.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_Service.py @@ -30,7 +30,6 @@ from .Handlers import update_vpn from .YangValidator import YangValidator LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) class L2VPN_Service(Resource): @HTTP_AUTH.login_required diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_Services.py b/src/nbi/service/ietf_l2vpn/L2VPN_Services.py index daf7eb901..a43a78725 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_Services.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_Services.py @@ -24,7 +24,6 @@ from .Handlers import process_site, process_vpn_service from .YangValidator import YangValidator LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) class L2VPN_Services(Resource): @HTTP_AUTH.login_required diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 6161939d9..782fddf44 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -29,7 +29,6 @@ from .Handlers import process_site_network_access from .YangValidator import YangValidator LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) class L2VPN_SiteNetworkAccesses(Resource): @HTTP_AUTH.login_required -- GitLab From 986802dedda76b54da69462491c78efbac5e8de6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 21 Jan 2026 18:56:32 +0000 Subject: [PATCH 56/79] CI/CD pipeline: - Reactivated all tests --- .gitlab-ci.yml | 76 ++++++++++++++++++++-------------------- src/tests/.gitlab-ci.yml | 34 +++++++++--------- 2 files changed, 55 insertions(+), 55 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6627e11cb..53763f5e1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -28,44 +28,44 @@ workflow: # include the individual .gitlab-ci.yml of each micro-service and tests include: -# #- local: '/manifests/.gitlab-ci.yml' -# - local: '/src/monitoring/.gitlab-ci.yml' -# - local: '/src/nbi/.gitlab-ci.yml' -# - local: '/src/context/.gitlab-ci.yml' -# - local: '/src/device/.gitlab-ci.yml' -# - local: '/src/service/.gitlab-ci.yml' -# - local: '/src/qkd_app/.gitlab-ci.yml' -# - local: '/src/dbscanserving/.gitlab-ci.yml' -# - local: '/src/opticalattackmitigator/.gitlab-ci.yml' -# - local: '/src/opticalattackdetector/.gitlab-ci.yml' -# - local: '/src/opticalattackmanager/.gitlab-ci.yml' -# - local: '/src/opticalcontroller/.gitlab-ci.yml' -# - local: '/src/ztp/.gitlab-ci.yml' -# - local: '/src/policy/.gitlab-ci.yml' -# - local: '/src/automation/.gitlab-ci.yml' -# - local: '/src/forecaster/.gitlab-ci.yml' -# #- local: '/src/webui/.gitlab-ci.yml' -# #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' -# #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' -# #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' -# - local: '/src/slice/.gitlab-ci.yml' -# #- local: '/src/interdomain/.gitlab-ci.yml' -# - local: '/src/pathcomp/.gitlab-ci.yml' -# #- local: '/src/dlt/.gitlab-ci.yml' -# - local: '/src/load_generator/.gitlab-ci.yml' -# - local: '/src/bgpls_speaker/.gitlab-ci.yml' -# - local: '/src/kpi_manager/.gitlab-ci.yml' -# - local: '/src/kpi_value_api/.gitlab-ci.yml' -# #- local: '/src/kpi_value_writer/.gitlab-ci.yml' -# #- local: '/src/telemetry/.gitlab-ci.yml' -# - local: '/src/analytics/.gitlab-ci.yml' -# - local: '/src/qos_profile/.gitlab-ci.yml' -# - local: '/src/vnt_manager/.gitlab-ci.yml' -# - local: '/src/e2e_orchestrator/.gitlab-ci.yml' -# - local: '/src/ztp_server/.gitlab-ci.yml' -# - local: '/src/osm_client/.gitlab-ci.yml' -# - local: '/src/simap_connector/.gitlab-ci.yml' -# - local: '/src/pluggables/.gitlab-ci.yml' + #- local: '/manifests/.gitlab-ci.yml' + - local: '/src/monitoring/.gitlab-ci.yml' + - local: '/src/nbi/.gitlab-ci.yml' + - local: '/src/context/.gitlab-ci.yml' + - local: '/src/device/.gitlab-ci.yml' + - local: '/src/service/.gitlab-ci.yml' + - local: '/src/qkd_app/.gitlab-ci.yml' + - local: '/src/dbscanserving/.gitlab-ci.yml' + - local: '/src/opticalattackmitigator/.gitlab-ci.yml' + - local: '/src/opticalattackdetector/.gitlab-ci.yml' + - local: '/src/opticalattackmanager/.gitlab-ci.yml' + - local: '/src/opticalcontroller/.gitlab-ci.yml' + - local: '/src/ztp/.gitlab-ci.yml' + - local: '/src/policy/.gitlab-ci.yml' + - local: '/src/automation/.gitlab-ci.yml' + - local: '/src/forecaster/.gitlab-ci.yml' + #- local: '/src/webui/.gitlab-ci.yml' + #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' + #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' + #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' + - local: '/src/slice/.gitlab-ci.yml' + #- local: '/src/interdomain/.gitlab-ci.yml' + - local: '/src/pathcomp/.gitlab-ci.yml' + #- local: '/src/dlt/.gitlab-ci.yml' + - local: '/src/load_generator/.gitlab-ci.yml' + - local: '/src/bgpls_speaker/.gitlab-ci.yml' + - local: '/src/kpi_manager/.gitlab-ci.yml' + - local: '/src/kpi_value_api/.gitlab-ci.yml' + #- local: '/src/kpi_value_writer/.gitlab-ci.yml' + #- local: '/src/telemetry/.gitlab-ci.yml' + - local: '/src/analytics/.gitlab-ci.yml' + - local: '/src/qos_profile/.gitlab-ci.yml' + - local: '/src/vnt_manager/.gitlab-ci.yml' + - local: '/src/e2e_orchestrator/.gitlab-ci.yml' + - local: '/src/ztp_server/.gitlab-ci.yml' + - local: '/src/osm_client/.gitlab-ci.yml' + - local: '/src/simap_connector/.gitlab-ci.yml' + - local: '/src/pluggables/.gitlab-ci.yml' # This should be last one: end-to-end integration tests - local: '/src/tests/.gitlab-ci.yml' diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index 7472b8f43..267d7ac23 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -14,22 +14,22 @@ # include the individual .gitlab-ci.yml of each end-to-end integration test include: -# - local: '/src/tests/ofc22/.gitlab-ci.yml' -# #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' -# - local: '/src/tests/ecoc22/.gitlab-ci.yml' -# #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' -# #- local: '/src/tests/ofc23/.gitlab-ci.yml' -# - local: '/src/tests/ofc24/.gitlab-ci.yml' -# - local: '/src/tests/eucnc24/.gitlab-ci.yml' -# #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' -# #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' -# #- local: '/src/tests/ofc25/.gitlab-ci.yml' -# - local: '/src/tests/ryu-openflow/.gitlab-ci.yml' -# - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' -# - local: '/src/tests/acl_end2end/.gitlab-ci.yml' + - local: '/src/tests/ofc22/.gitlab-ci.yml' + #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' + - local: '/src/tests/ecoc22/.gitlab-ci.yml' + #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' + #- local: '/src/tests/ofc23/.gitlab-ci.yml' + - local: '/src/tests/ofc24/.gitlab-ci.yml' + - local: '/src/tests/eucnc24/.gitlab-ci.yml' + #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' + #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' + #- local: '/src/tests/ofc25/.gitlab-ci.yml' + - local: '/src/tests/ryu-openflow/.gitlab-ci.yml' + - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' + - local: '/src/tests/acl_end2end/.gitlab-ci.yml' - local: '/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml' -# - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' -# - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' -# - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' -# - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' + - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' + - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' + - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' + - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' -- GitLab From 0daba209d0814998e37d17829b6d68a77623673e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 10:43:17 +0000 Subject: [PATCH 57/79] CI/CD pipeline: - Deactivated all tests but ofc22 that fails --- .gitlab-ci.yml | 76 ++++++++++++++++++++-------------------- src/tests/.gitlab-ci.yml | 36 +++++++++---------- 2 files changed, 56 insertions(+), 56 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 53763f5e1..6627e11cb 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -28,44 +28,44 @@ workflow: # include the individual .gitlab-ci.yml of each micro-service and tests include: - #- local: '/manifests/.gitlab-ci.yml' - - local: '/src/monitoring/.gitlab-ci.yml' - - local: '/src/nbi/.gitlab-ci.yml' - - local: '/src/context/.gitlab-ci.yml' - - local: '/src/device/.gitlab-ci.yml' - - local: '/src/service/.gitlab-ci.yml' - - local: '/src/qkd_app/.gitlab-ci.yml' - - local: '/src/dbscanserving/.gitlab-ci.yml' - - local: '/src/opticalattackmitigator/.gitlab-ci.yml' - - local: '/src/opticalattackdetector/.gitlab-ci.yml' - - local: '/src/opticalattackmanager/.gitlab-ci.yml' - - local: '/src/opticalcontroller/.gitlab-ci.yml' - - local: '/src/ztp/.gitlab-ci.yml' - - local: '/src/policy/.gitlab-ci.yml' - - local: '/src/automation/.gitlab-ci.yml' - - local: '/src/forecaster/.gitlab-ci.yml' - #- local: '/src/webui/.gitlab-ci.yml' - #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' - #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' - #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' - - local: '/src/slice/.gitlab-ci.yml' - #- local: '/src/interdomain/.gitlab-ci.yml' - - local: '/src/pathcomp/.gitlab-ci.yml' - #- local: '/src/dlt/.gitlab-ci.yml' - - local: '/src/load_generator/.gitlab-ci.yml' - - local: '/src/bgpls_speaker/.gitlab-ci.yml' - - local: '/src/kpi_manager/.gitlab-ci.yml' - - local: '/src/kpi_value_api/.gitlab-ci.yml' - #- local: '/src/kpi_value_writer/.gitlab-ci.yml' - #- local: '/src/telemetry/.gitlab-ci.yml' - - local: '/src/analytics/.gitlab-ci.yml' - - local: '/src/qos_profile/.gitlab-ci.yml' - - local: '/src/vnt_manager/.gitlab-ci.yml' - - local: '/src/e2e_orchestrator/.gitlab-ci.yml' - - local: '/src/ztp_server/.gitlab-ci.yml' - - local: '/src/osm_client/.gitlab-ci.yml' - - local: '/src/simap_connector/.gitlab-ci.yml' - - local: '/src/pluggables/.gitlab-ci.yml' +# #- local: '/manifests/.gitlab-ci.yml' +# - local: '/src/monitoring/.gitlab-ci.yml' +# - local: '/src/nbi/.gitlab-ci.yml' +# - local: '/src/context/.gitlab-ci.yml' +# - local: '/src/device/.gitlab-ci.yml' +# - local: '/src/service/.gitlab-ci.yml' +# - local: '/src/qkd_app/.gitlab-ci.yml' +# - local: '/src/dbscanserving/.gitlab-ci.yml' +# - local: '/src/opticalattackmitigator/.gitlab-ci.yml' +# - local: '/src/opticalattackdetector/.gitlab-ci.yml' +# - local: '/src/opticalattackmanager/.gitlab-ci.yml' +# - local: '/src/opticalcontroller/.gitlab-ci.yml' +# - local: '/src/ztp/.gitlab-ci.yml' +# - local: '/src/policy/.gitlab-ci.yml' +# - local: '/src/automation/.gitlab-ci.yml' +# - local: '/src/forecaster/.gitlab-ci.yml' +# #- local: '/src/webui/.gitlab-ci.yml' +# #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' +# #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' +# #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' +# - local: '/src/slice/.gitlab-ci.yml' +# #- local: '/src/interdomain/.gitlab-ci.yml' +# - local: '/src/pathcomp/.gitlab-ci.yml' +# #- local: '/src/dlt/.gitlab-ci.yml' +# - local: '/src/load_generator/.gitlab-ci.yml' +# - local: '/src/bgpls_speaker/.gitlab-ci.yml' +# - local: '/src/kpi_manager/.gitlab-ci.yml' +# - local: '/src/kpi_value_api/.gitlab-ci.yml' +# #- local: '/src/kpi_value_writer/.gitlab-ci.yml' +# #- local: '/src/telemetry/.gitlab-ci.yml' +# - local: '/src/analytics/.gitlab-ci.yml' +# - local: '/src/qos_profile/.gitlab-ci.yml' +# - local: '/src/vnt_manager/.gitlab-ci.yml' +# - local: '/src/e2e_orchestrator/.gitlab-ci.yml' +# - local: '/src/ztp_server/.gitlab-ci.yml' +# - local: '/src/osm_client/.gitlab-ci.yml' +# - local: '/src/simap_connector/.gitlab-ci.yml' +# - local: '/src/pluggables/.gitlab-ci.yml' # This should be last one: end-to-end integration tests - local: '/src/tests/.gitlab-ci.yml' diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index 267d7ac23..96f941053 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -15,21 +15,21 @@ # include the individual .gitlab-ci.yml of each end-to-end integration test include: - local: '/src/tests/ofc22/.gitlab-ci.yml' - #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' - - local: '/src/tests/ecoc22/.gitlab-ci.yml' - #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' - #- local: '/src/tests/ofc23/.gitlab-ci.yml' - - local: '/src/tests/ofc24/.gitlab-ci.yml' - - local: '/src/tests/eucnc24/.gitlab-ci.yml' - #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' - #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' - #- local: '/src/tests/ofc25/.gitlab-ci.yml' - - local: '/src/tests/ryu-openflow/.gitlab-ci.yml' - - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' - - local: '/src/tests/acl_end2end/.gitlab-ci.yml' - - local: '/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml' - - - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' - - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' - - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' - - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' +# #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' +# - local: '/src/tests/ecoc22/.gitlab-ci.yml' +# #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' +# #- local: '/src/tests/ofc23/.gitlab-ci.yml' +# - local: '/src/tests/ofc24/.gitlab-ci.yml' +# - local: '/src/tests/eucnc24/.gitlab-ci.yml' +# #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' +# #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' +# #- local: '/src/tests/ofc25/.gitlab-ci.yml' +# - local: '/src/tests/ryu-openflow/.gitlab-ci.yml' +# - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' +# - local: '/src/tests/acl_end2end/.gitlab-ci.yml' +# - local: '/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml' +# +# - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' +# - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' +# - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' +# - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' -- GitLab From f7527c05f382dc415b4d24ca57265e903fab6241 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 10:43:41 +0000 Subject: [PATCH 58/79] End-to-end test - OFC22: - Fixed services/slices expected --- src/tests/ofc22/tests/test_functional_create_service.py | 2 +- src/tests/ofc22/tests/test_functional_delete_service.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/ofc22/tests/test_functional_create_service.py b/src/tests/ofc22/tests/test_functional_create_service.py index 1a4dcd325..09a1afcde 100644 --- a/src/tests/ofc22/tests/test_functional_create_service.py +++ b/src/tests/ofc22/tests/test_functional_create_service.py @@ -49,7 +49,7 @@ def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # # Ensure slices and services are created response = context_client.ListSlices(ADMIN_CONTEXT_ID) LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) - assert len(response.slices) == 1 # OSM slice + assert len(response.slices) == 0 # no slice should be created response = context_client.ListServices(ADMIN_CONTEXT_ID) LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) diff --git a/src/tests/ofc22/tests/test_functional_delete_service.py b/src/tests/ofc22/tests/test_functional_delete_service.py index 88677b0fe..cdc3894b3 100644 --- a/src/tests/ofc22/tests/test_functional_delete_service.py +++ b/src/tests/ofc22/tests/test_functional_delete_service.py @@ -33,7 +33,7 @@ def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # p # Ensure slices and services are created response = context_client.ListSlices(ADMIN_CONTEXT_ID) LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) - assert len(response.slices) == 1 # OSM slice + assert len(response.slices) == 0 # no slice should be created response = context_client.ListServices(ADMIN_CONTEXT_ID) LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) -- GitLab From d7cececcedf05e3eaffd26e80f682006fef021cc Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 12:24:40 +0000 Subject: [PATCH 59/79] NBI component - IETF L2VPN connector: - Hacked IETF L2VPN to create L3 service for OSM as needed, and L2 otherwise. --- src/nbi/service/ietf_l2vpn/Handlers.py | 13 ++++++++++--- src/nbi/service/ietf_l2vpn/L2VPN_Service.py | 10 ++++++++-- .../service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 6 +++++- 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/src/nbi/service/ietf_l2vpn/Handlers.py b/src/nbi/service/ietf_l2vpn/Handlers.py index 81e5be981..9fe7e2a0a 100644 --- a/src/nbi/service/ietf_l2vpn/Handlers.py +++ b/src/nbi/service/ietf_l2vpn/Handlers.py @@ -35,13 +35,15 @@ from .Constants import ( LOGGER = logging.getLogger(__name__) def create_service( - service_uuid : str, context_uuid : Optional[str] = DEFAULT_CONTEXT_NAME + service_uuid : str, + service_type : ServiceTypeEnum = ServiceTypeEnum.SERVICETYPE_L2NM, + context_uuid : Optional[str] = DEFAULT_CONTEXT_NAME, ) -> Optional[Exception]: # pylint: disable=no-member service_request = Service() service_request.service_id.context_id.context_uuid.uuid = context_uuid service_request.service_id.service_uuid.uuid = service_uuid - service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM + service_request.service_type = service_type service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED try: @@ -56,7 +58,12 @@ def process_vpn_service( vpn_service : Dict, errors : List[Dict] ) -> None: vpn_id = vpn_service['vpn-id'] - exc = create_service(vpn_id) + customer_name = vpn_service.get('customer-name') + if isinstance(customer_name, str) and customer_name.strip().lower() == 'osm': + service_type = ServiceTypeEnum.SERVICETYPE_L3NM + else: + service_type = ServiceTypeEnum.SERVICETYPE_L2NM + exc = create_service(vpn_id, service_type=service_type) if exc is not None: errors.append({'error': str(exc)}) diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_Service.py b/src/nbi/service/ietf_l2vpn/L2VPN_Service.py index 13d46418c..31354ae32 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_Service.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_Service.py @@ -44,7 +44,10 @@ class L2VPN_Service(Resource): if target is None: raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) - if target.service_type != ServiceTypeEnum.SERVICETYPE_L2NM: + if target.service_type not in ( + ServiceTypeEnum.SERVICETYPE_L2NM, + ServiceTypeEnum.SERVICETYPE_L3NM, + ): raise Exception('VPN({:s}) is not L2VPN'.format(str(vpn_id))) service_ids = {target.service_id.service_uuid.uuid, target.name} # pylint: disable=no-member @@ -72,7 +75,10 @@ class L2VPN_Service(Resource): target = get_service_by_uuid(context_client, vpn_id) if target is None: LOGGER.warning('VPN({:s}) not found in database. Nothing done.'.format(str(vpn_id))) - elif target.service_type != ServiceTypeEnum.SERVICETYPE_L2NM: + elif target.service_type not in ( + ServiceTypeEnum.SERVICETYPE_L2NM, + ServiceTypeEnum.SERVICETYPE_L3NM, + ): raise Exception('VPN({:s}) is not L2VPN'.format(str(vpn_id))) else: service_ids = {target.service_id.service_uuid.uuid, target.name} # pylint: disable=no-member diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 782fddf44..bd9706f31 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -123,7 +123,11 @@ class L2VPN_SiteNetworkAccesses(Resource): context_client = ContextClient() vpn_services = list() for service in get_services(context_client): - if service.service_type != ServiceTypeEnum.SERVICETYPE_L2NM: continue + if service.service_type not in ( + ServiceTypeEnum.SERVICETYPE_L2NM, + ServiceTypeEnum.SERVICETYPE_L3NM, + ): + continue # De-duplicate services uuid/names in case service_uuid == service_name vpn_ids = {service.service_id.service_uuid.uuid, service.name} -- GitLab From 0ca2211229c6db9661bd6bb2d39be7e987c785cc Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 12:30:49 +0000 Subject: [PATCH 60/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed IETF create/delete services for OSM hack --- src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py | 4 ++-- src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py index ed02f7c62..d6eb1b76e 100644 --- a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py @@ -31,7 +31,7 @@ LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) - +EXPECTED_SERVICE_TYPES = (ServiceTypeEnum.SERVICETYPE_L2NM, ServiceTypeEnum.SERVICETYPE_L3NM) # pylint: disable=redefined-outer-name, unused-argument def test_service_ietf_creation( @@ -67,7 +67,7 @@ def test_service_ietf_creation( service_id = service.service_id assert service_id.service_uuid.uuid == service_uuid assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE - assert service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM + assert service.service_type in EXPECTED_SERVICE_TYPES response = context_client.ListConnections(service_id) LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py index 32d78b39d..43b4c48cf 100644 --- a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py @@ -31,6 +31,7 @@ LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) +EXPECTED_SERVICE_TYPES = (ServiceTypeEnum.SERVICETYPE_L2NM, ServiceTypeEnum.SERVICETYPE_L3NM) # pylint: disable=redefined-outer-name, unused-argument @@ -60,7 +61,7 @@ def test_service_ietf_removal( for service in response.services: service_id = service.service_id assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE - assert service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM + assert service.service_type == EXPECTED_SERVICE_TYPES response = context_client.ListConnections(service_id) LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( -- GitLab From fbf11000d5b9de220d7366bb932670eb688c6714 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 12:31:05 +0000 Subject: [PATCH 61/79] CI/CD pipeline: - Reactivated all tests --- .gitlab-ci.yml | 76 ++++++++++++++++++++-------------------- src/tests/.gitlab-ci.yml | 36 +++++++++---------- 2 files changed, 56 insertions(+), 56 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6627e11cb..53763f5e1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -28,44 +28,44 @@ workflow: # include the individual .gitlab-ci.yml of each micro-service and tests include: -# #- local: '/manifests/.gitlab-ci.yml' -# - local: '/src/monitoring/.gitlab-ci.yml' -# - local: '/src/nbi/.gitlab-ci.yml' -# - local: '/src/context/.gitlab-ci.yml' -# - local: '/src/device/.gitlab-ci.yml' -# - local: '/src/service/.gitlab-ci.yml' -# - local: '/src/qkd_app/.gitlab-ci.yml' -# - local: '/src/dbscanserving/.gitlab-ci.yml' -# - local: '/src/opticalattackmitigator/.gitlab-ci.yml' -# - local: '/src/opticalattackdetector/.gitlab-ci.yml' -# - local: '/src/opticalattackmanager/.gitlab-ci.yml' -# - local: '/src/opticalcontroller/.gitlab-ci.yml' -# - local: '/src/ztp/.gitlab-ci.yml' -# - local: '/src/policy/.gitlab-ci.yml' -# - local: '/src/automation/.gitlab-ci.yml' -# - local: '/src/forecaster/.gitlab-ci.yml' -# #- local: '/src/webui/.gitlab-ci.yml' -# #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' -# #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' -# #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' -# - local: '/src/slice/.gitlab-ci.yml' -# #- local: '/src/interdomain/.gitlab-ci.yml' -# - local: '/src/pathcomp/.gitlab-ci.yml' -# #- local: '/src/dlt/.gitlab-ci.yml' -# - local: '/src/load_generator/.gitlab-ci.yml' -# - local: '/src/bgpls_speaker/.gitlab-ci.yml' -# - local: '/src/kpi_manager/.gitlab-ci.yml' -# - local: '/src/kpi_value_api/.gitlab-ci.yml' -# #- local: '/src/kpi_value_writer/.gitlab-ci.yml' -# #- local: '/src/telemetry/.gitlab-ci.yml' -# - local: '/src/analytics/.gitlab-ci.yml' -# - local: '/src/qos_profile/.gitlab-ci.yml' -# - local: '/src/vnt_manager/.gitlab-ci.yml' -# - local: '/src/e2e_orchestrator/.gitlab-ci.yml' -# - local: '/src/ztp_server/.gitlab-ci.yml' -# - local: '/src/osm_client/.gitlab-ci.yml' -# - local: '/src/simap_connector/.gitlab-ci.yml' -# - local: '/src/pluggables/.gitlab-ci.yml' + #- local: '/manifests/.gitlab-ci.yml' + - local: '/src/monitoring/.gitlab-ci.yml' + - local: '/src/nbi/.gitlab-ci.yml' + - local: '/src/context/.gitlab-ci.yml' + - local: '/src/device/.gitlab-ci.yml' + - local: '/src/service/.gitlab-ci.yml' + - local: '/src/qkd_app/.gitlab-ci.yml' + - local: '/src/dbscanserving/.gitlab-ci.yml' + - local: '/src/opticalattackmitigator/.gitlab-ci.yml' + - local: '/src/opticalattackdetector/.gitlab-ci.yml' + - local: '/src/opticalattackmanager/.gitlab-ci.yml' + - local: '/src/opticalcontroller/.gitlab-ci.yml' + - local: '/src/ztp/.gitlab-ci.yml' + - local: '/src/policy/.gitlab-ci.yml' + - local: '/src/automation/.gitlab-ci.yml' + - local: '/src/forecaster/.gitlab-ci.yml' + #- local: '/src/webui/.gitlab-ci.yml' + #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' + #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' + #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' + - local: '/src/slice/.gitlab-ci.yml' + #- local: '/src/interdomain/.gitlab-ci.yml' + - local: '/src/pathcomp/.gitlab-ci.yml' + #- local: '/src/dlt/.gitlab-ci.yml' + - local: '/src/load_generator/.gitlab-ci.yml' + - local: '/src/bgpls_speaker/.gitlab-ci.yml' + - local: '/src/kpi_manager/.gitlab-ci.yml' + - local: '/src/kpi_value_api/.gitlab-ci.yml' + #- local: '/src/kpi_value_writer/.gitlab-ci.yml' + #- local: '/src/telemetry/.gitlab-ci.yml' + - local: '/src/analytics/.gitlab-ci.yml' + - local: '/src/qos_profile/.gitlab-ci.yml' + - local: '/src/vnt_manager/.gitlab-ci.yml' + - local: '/src/e2e_orchestrator/.gitlab-ci.yml' + - local: '/src/ztp_server/.gitlab-ci.yml' + - local: '/src/osm_client/.gitlab-ci.yml' + - local: '/src/simap_connector/.gitlab-ci.yml' + - local: '/src/pluggables/.gitlab-ci.yml' # This should be last one: end-to-end integration tests - local: '/src/tests/.gitlab-ci.yml' diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index 96f941053..267d7ac23 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -15,21 +15,21 @@ # include the individual .gitlab-ci.yml of each end-to-end integration test include: - local: '/src/tests/ofc22/.gitlab-ci.yml' -# #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' -# - local: '/src/tests/ecoc22/.gitlab-ci.yml' -# #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' -# #- local: '/src/tests/ofc23/.gitlab-ci.yml' -# - local: '/src/tests/ofc24/.gitlab-ci.yml' -# - local: '/src/tests/eucnc24/.gitlab-ci.yml' -# #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' -# #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' -# #- local: '/src/tests/ofc25/.gitlab-ci.yml' -# - local: '/src/tests/ryu-openflow/.gitlab-ci.yml' -# - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' -# - local: '/src/tests/acl_end2end/.gitlab-ci.yml' -# - local: '/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml' -# -# - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' -# - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' -# - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' -# - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' + #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' + - local: '/src/tests/ecoc22/.gitlab-ci.yml' + #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' + #- local: '/src/tests/ofc23/.gitlab-ci.yml' + - local: '/src/tests/ofc24/.gitlab-ci.yml' + - local: '/src/tests/eucnc24/.gitlab-ci.yml' + #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' + #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' + #- local: '/src/tests/ofc25/.gitlab-ci.yml' + - local: '/src/tests/ryu-openflow/.gitlab-ci.yml' + - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' + - local: '/src/tests/acl_end2end/.gitlab-ci.yml' + - local: '/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml' + + - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' + - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' + - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' + - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' -- GitLab From f00b014fd09d57209452244df07f47a311c53205 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 15:58:22 +0000 Subject: [PATCH 62/79] Service component: - Fixed logging in method TaskExecutor::get_device_type_drivers_for_connection() --- src/service/service/task_scheduler/TaskExecutor.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py index 9f08788ef..e2709d9bc 100644 --- a/src/service/service/task_scheduler/TaskExecutor.py +++ b/src/service/service/task_scheduler/TaskExecutor.py @@ -382,7 +382,17 @@ class TaskExecutor: controller_uuid = controller.device_id.device_uuid.uuid devices.setdefault(device_type, dict())[controller_uuid] = (controller, controller_drivers) - LOGGER.debug('[get_devices_from_connection] devices = {:s}'.format(str(devices))) + plain_devices = { + device_type : { + device_uuid : { + 'grpc_object' : grpc_message_to_json_string(device_grpc), + 'device_drivers' : list(device_drivers) + } + for device_uuid, (device_grpc, device_drivers) in device_dict.items() + } + for device_type, device_dict in devices.items() + } + LOGGER.debug('[get_device_type_drivers_for_connection] devices = {:s}'.format(str(plain_devices))) return devices -- GitLab From fbfb96f288dd227806ff8dc1fc0a861c4fb4d4ae Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 15:59:07 +0000 Subject: [PATCH 63/79] Test - Tools - Mock OSM: - Fixed wim_mapping() method to accept a bearer_prefix --- src/tests/tools/mock_osm/Tools.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/tests/tools/mock_osm/Tools.py b/src/tests/tools/mock_osm/Tools.py index 56b0c11d2..4ddc7974d 100644 --- a/src/tests/tools/mock_osm/Tools.py +++ b/src/tests/tools/mock_osm/Tools.py @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Optional +from typing import Dict, Optional, Tuple def compose_service_endpoint_id(site_id : str, endpoint_id : Dict): device_uuid = endpoint_id['device_id']['device_uuid']['uuid'] endpoint_uuid = endpoint_id['endpoint_uuid']['uuid'] return ':'.join([site_id, device_uuid, endpoint_uuid]) -def wim_mapping(site_id, ce_endpoint_id, pe_device_id : Optional[Dict] = None, priority=None, redundant=[]): +def wim_mapping( + site_id, ce_endpoint_id, pe_device_id : Optional[Dict] = None, + bearer_prefix : Optional[str] = None, priority=None, redundant=[] +) -> Tuple[str, Dict]: ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid'] ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid'] service_endpoint_id = compose_service_endpoint_id(site_id, ce_endpoint_id) @@ -28,6 +31,8 @@ def wim_mapping(site_id, ce_endpoint_id, pe_device_id : Optional[Dict] = None, p else: pe_device_uuid = pe_device_id['device_uuid']['uuid'] bearer = '{:s}:{:s}'.format(ce_device_uuid, pe_device_uuid) + if bearer_prefix is not None: + bearer = '{:s}:{:s}'.format(bearer_prefix, bearer) mapping = { 'service_endpoint_id': service_endpoint_id, 'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid, -- GitLab From e4f86dcbb3f27751fd42ac6ce9020bb1fca1b93e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 16:53:14 +0000 Subject: [PATCH 64/79] End-to-end test - L2 VPN gNMI OpenConfig: - Added raise to capture generated l2vpn request --- src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py index d6eb1b76e..76b980c87 100644 --- a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py @@ -75,3 +75,5 @@ def test_service_ietf_creation( grpc_message_to_json_string(response) )) assert len(response.connections) == 1 + + raise Exception() -- GitLab From c86629eae4e1203ef0fca61b8cb2c7138a0e32fb Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 16:54:01 +0000 Subject: [PATCH 65/79] CI/CD pipeline: - Deactivated other tests --- .gitlab-ci.yml | 76 ++++++++++++++++++++-------------------- src/tests/.gitlab-ci.yml | 34 +++++++++--------- 2 files changed, 55 insertions(+), 55 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 53763f5e1..6627e11cb 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -28,44 +28,44 @@ workflow: # include the individual .gitlab-ci.yml of each micro-service and tests include: - #- local: '/manifests/.gitlab-ci.yml' - - local: '/src/monitoring/.gitlab-ci.yml' - - local: '/src/nbi/.gitlab-ci.yml' - - local: '/src/context/.gitlab-ci.yml' - - local: '/src/device/.gitlab-ci.yml' - - local: '/src/service/.gitlab-ci.yml' - - local: '/src/qkd_app/.gitlab-ci.yml' - - local: '/src/dbscanserving/.gitlab-ci.yml' - - local: '/src/opticalattackmitigator/.gitlab-ci.yml' - - local: '/src/opticalattackdetector/.gitlab-ci.yml' - - local: '/src/opticalattackmanager/.gitlab-ci.yml' - - local: '/src/opticalcontroller/.gitlab-ci.yml' - - local: '/src/ztp/.gitlab-ci.yml' - - local: '/src/policy/.gitlab-ci.yml' - - local: '/src/automation/.gitlab-ci.yml' - - local: '/src/forecaster/.gitlab-ci.yml' - #- local: '/src/webui/.gitlab-ci.yml' - #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' - #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' - #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' - - local: '/src/slice/.gitlab-ci.yml' - #- local: '/src/interdomain/.gitlab-ci.yml' - - local: '/src/pathcomp/.gitlab-ci.yml' - #- local: '/src/dlt/.gitlab-ci.yml' - - local: '/src/load_generator/.gitlab-ci.yml' - - local: '/src/bgpls_speaker/.gitlab-ci.yml' - - local: '/src/kpi_manager/.gitlab-ci.yml' - - local: '/src/kpi_value_api/.gitlab-ci.yml' - #- local: '/src/kpi_value_writer/.gitlab-ci.yml' - #- local: '/src/telemetry/.gitlab-ci.yml' - - local: '/src/analytics/.gitlab-ci.yml' - - local: '/src/qos_profile/.gitlab-ci.yml' - - local: '/src/vnt_manager/.gitlab-ci.yml' - - local: '/src/e2e_orchestrator/.gitlab-ci.yml' - - local: '/src/ztp_server/.gitlab-ci.yml' - - local: '/src/osm_client/.gitlab-ci.yml' - - local: '/src/simap_connector/.gitlab-ci.yml' - - local: '/src/pluggables/.gitlab-ci.yml' +# #- local: '/manifests/.gitlab-ci.yml' +# - local: '/src/monitoring/.gitlab-ci.yml' +# - local: '/src/nbi/.gitlab-ci.yml' +# - local: '/src/context/.gitlab-ci.yml' +# - local: '/src/device/.gitlab-ci.yml' +# - local: '/src/service/.gitlab-ci.yml' +# - local: '/src/qkd_app/.gitlab-ci.yml' +# - local: '/src/dbscanserving/.gitlab-ci.yml' +# - local: '/src/opticalattackmitigator/.gitlab-ci.yml' +# - local: '/src/opticalattackdetector/.gitlab-ci.yml' +# - local: '/src/opticalattackmanager/.gitlab-ci.yml' +# - local: '/src/opticalcontroller/.gitlab-ci.yml' +# - local: '/src/ztp/.gitlab-ci.yml' +# - local: '/src/policy/.gitlab-ci.yml' +# - local: '/src/automation/.gitlab-ci.yml' +# - local: '/src/forecaster/.gitlab-ci.yml' +# #- local: '/src/webui/.gitlab-ci.yml' +# #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' +# #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' +# #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' +# - local: '/src/slice/.gitlab-ci.yml' +# #- local: '/src/interdomain/.gitlab-ci.yml' +# - local: '/src/pathcomp/.gitlab-ci.yml' +# #- local: '/src/dlt/.gitlab-ci.yml' +# - local: '/src/load_generator/.gitlab-ci.yml' +# - local: '/src/bgpls_speaker/.gitlab-ci.yml' +# - local: '/src/kpi_manager/.gitlab-ci.yml' +# - local: '/src/kpi_value_api/.gitlab-ci.yml' +# #- local: '/src/kpi_value_writer/.gitlab-ci.yml' +# #- local: '/src/telemetry/.gitlab-ci.yml' +# - local: '/src/analytics/.gitlab-ci.yml' +# - local: '/src/qos_profile/.gitlab-ci.yml' +# - local: '/src/vnt_manager/.gitlab-ci.yml' +# - local: '/src/e2e_orchestrator/.gitlab-ci.yml' +# - local: '/src/ztp_server/.gitlab-ci.yml' +# - local: '/src/osm_client/.gitlab-ci.yml' +# - local: '/src/simap_connector/.gitlab-ci.yml' +# - local: '/src/pluggables/.gitlab-ci.yml' # This should be last one: end-to-end integration tests - local: '/src/tests/.gitlab-ci.yml' diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index 267d7ac23..7472b8f43 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -14,22 +14,22 @@ # include the individual .gitlab-ci.yml of each end-to-end integration test include: - - local: '/src/tests/ofc22/.gitlab-ci.yml' - #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' - - local: '/src/tests/ecoc22/.gitlab-ci.yml' - #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' - #- local: '/src/tests/ofc23/.gitlab-ci.yml' - - local: '/src/tests/ofc24/.gitlab-ci.yml' - - local: '/src/tests/eucnc24/.gitlab-ci.yml' - #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' - #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' - #- local: '/src/tests/ofc25/.gitlab-ci.yml' - - local: '/src/tests/ryu-openflow/.gitlab-ci.yml' - - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' - - local: '/src/tests/acl_end2end/.gitlab-ci.yml' +# - local: '/src/tests/ofc22/.gitlab-ci.yml' +# #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' +# - local: '/src/tests/ecoc22/.gitlab-ci.yml' +# #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' +# #- local: '/src/tests/ofc23/.gitlab-ci.yml' +# - local: '/src/tests/ofc24/.gitlab-ci.yml' +# - local: '/src/tests/eucnc24/.gitlab-ci.yml' +# #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' +# #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' +# #- local: '/src/tests/ofc25/.gitlab-ci.yml' +# - local: '/src/tests/ryu-openflow/.gitlab-ci.yml' +# - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' +# - local: '/src/tests/acl_end2end/.gitlab-ci.yml' - local: '/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml' - - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' - - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' - - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' - - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' +# - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' +# - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' +# - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' +# - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' -- GitLab From 70e32f4bf8dc7632378717a77ef97ab4280e9e65 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 16:55:17 +0000 Subject: [PATCH 66/79] End-to-end test - L2 VPN gNMI OpenConfig: - Disabled TFS descriptor based service test --- src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml | 78 ++++++++++++------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml index c3e175002..38a473f6c 100644 --- a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml +++ b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml @@ -214,45 +214,45 @@ end2end_test l2_vpn_gnmi_oc: - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" - # Run end-to-end test: configure service TFS - - > - docker run -t --rm --name ${TEST_NAME} --network=host - --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" - --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" - $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-create.sh - - # Give time to routers for being configured and stabilized - - sleep 60 - - # Dump configuration of the routers (after configure TFS service) - - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - # Run end-to-end test: test connectivity with ping - - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" - - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 3 received, 0% packet loss" - - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" - - # Run end-to-end test: deconfigure service TFS - - > - docker run -t --rm --name ${TEST_NAME} --network=host - --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" - --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" - $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-remove.sh - - # Give time to routers for being configured and stabilized - - sleep 60 - - # Dump configuration of the routers (after deconfigure TFS service) - - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - # Run end-to-end test: test no connectivity with ping - - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" - - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" - - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" +# # Run end-to-end test: configure service TFS +# - > +# docker run -t --rm --name ${TEST_NAME} --network=host +# --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" +# --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" +# $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-create.sh +# +# # Give time to routers for being configured and stabilized +# - sleep 60 +# +# # Dump configuration of the routers (after configure TFS service) +# - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# +# # Run end-to-end test: test connectivity with ping +# - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" +# - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 3 received, 0% packet loss" +# - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" +# +# # Run end-to-end test: deconfigure service TFS +# - > +# docker run -t --rm --name ${TEST_NAME} --network=host +# --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" +# --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" +# $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-remove.sh +# +# # Give time to routers for being configured and stabilized +# - sleep 60 +# +# # Dump configuration of the routers (after deconfigure TFS service) +# - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# +# # Run end-to-end test: test no connectivity with ping +# - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" +# - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" +# - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" # Run end-to-end test: configure service IETF - > -- GitLab From 0328f8c73608998e5a4aed88d2a699180d9f1c71 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 17:39:22 +0000 Subject: [PATCH 67/79] End-to-end test - L2 VPN gNMI OpenConfig: - Updated IETF L2VPN test based on static JSON payload --- .../data/ietf-l2vpn-service.json | 70 +++ src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py | 15 - src/tests/l2_vpn_gnmi_oc/tests/MockOSM.py | 62 -- .../l2_vpn_gnmi_oc/tests/OSM_Constants.py | 53 -- src/tests/l2_vpn_gnmi_oc/tests/Tools.py | 109 ++++ .../tests/WimconnectorIETFL2VPN.py | 545 ------------------ .../l2_vpn_gnmi_oc/tests/acknowledgements.txt | 3 - src/tests/l2_vpn_gnmi_oc/tests/sdnconn.py | 242 -------- .../tests/test_service_ietf_create.py | 43 +- .../tests/test_service_ietf_remove.py | 20 +- 10 files changed, 204 insertions(+), 958 deletions(-) create mode 100644 src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json delete mode 100644 src/tests/l2_vpn_gnmi_oc/tests/MockOSM.py delete mode 100644 src/tests/l2_vpn_gnmi_oc/tests/OSM_Constants.py create mode 100644 src/tests/l2_vpn_gnmi_oc/tests/Tools.py delete mode 100644 src/tests/l2_vpn_gnmi_oc/tests/WimconnectorIETFL2VPN.py delete mode 100644 src/tests/l2_vpn_gnmi_oc/tests/acknowledgements.txt delete mode 100644 src/tests/l2_vpn_gnmi_oc/tests/sdnconn.py diff --git a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json new file mode 100644 index 000000000..14185a8c1 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json @@ -0,0 +1,70 @@ +{ + "ietf-l2vpn-svc:l2vpn-svc": { + "vpn-services": {"vpn-service": [ + { + "vpn-id": "ietf-l2vpn-svc", + "vpn-svc-type": "vpws", + "svc-topo": "any-to-any", + "customer-name": "somebody" + } + ]}, + "sites": { + "site": [ + { + "site-id": "site_DC1", + "management": {"type": "ietf-l2vpn-svc:provider-managed"}, + "locations": {"location": [{"location-id": "DC1"}]}, + "devices": {"device": [{"device-id": "dc1", "location": "DC1"}]}, + "site-network-accesses": { + "site-network-access": [ + { + "network-access-id": "eth1", + "type": "ietf-l3vpn-svc:multipoint", + "device-reference": "dc1", + "vpn-attachment": { + "vpn-id": "ietf-l2vpn-svc", + "site-role": "ietf-l2vpn-svc:any-to-any-role" + }, + "bearer": {"bearer-reference": "r1:Ethernet10"}, + "connection": { + "encapsulation-type": "vlan", + "tagged-interface": { + "type": "ietf-l2vpn-svc:dot1q", + "dot1q-vlan-tagged": {"cvlan-id": 125} + } + } + } + ] + } + }, + { + "site-id": "site_DC2", + "management": {"type": "ietf-l2vpn-svc:provider-managed"}, + "locations": {"location": [{"location-id": "DC2"}]}, + "devices": {"device": [{"device-id": "dc2", "location": "DC2"}]}, + "site-network-accesses": { + "site-network-access": [ + { + "network-access-id": "eth1", + "type": "ietf-l3vpn-svc:multipoint", + "device-reference": "dc2", + "vpn-attachment": { + "vpn-id": "ietf-l2vpn-svc", + "site-role": "ietf-l2vpn-svc:any-to-any-role" + }, + "bearer": {"bearer-reference": "r3:Ethernet10"}, + "connection": { + "encapsulation-type": "vlan", + "tagged-interface": { + "type": "ietf-l2vpn-svc:dot1q", + "dot1q-vlan-tagged": {"cvlan-id": 125} + } + } + } + ] + } + } + ] + } + } +} diff --git a/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py b/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py index fae8401bb..aa37459a1 100644 --- a/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py +++ b/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py @@ -13,24 +13,9 @@ # limitations under the License. import pytest -from common.Constants import ServiceNameEnum -from common.Settings import get_service_host, get_service_port_http from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from service.client.ServiceClient import ServiceClient -from .MockOSM import MockOSM -from .OSM_Constants import WIM_MAPPING - -NBI_ADDRESS = get_service_host(ServiceNameEnum.NBI) -NBI_PORT = get_service_port_http(ServiceNameEnum.NBI) -NBI_USERNAME = 'admin' -NBI_PASSWORD = 'admin' -NBI_BASE_URL = '' - -@pytest.fixture(scope='session') -def osm_wim() -> MockOSM: - wim_url = 'http://{:s}:{:d}'.format(NBI_ADDRESS, NBI_PORT) - return MockOSM(wim_url, WIM_MAPPING, NBI_USERNAME, NBI_PASSWORD) @pytest.fixture(scope='session') def context_client() -> ContextClient: diff --git a/src/tests/l2_vpn_gnmi_oc/tests/MockOSM.py b/src/tests/l2_vpn_gnmi_oc/tests/MockOSM.py deleted file mode 100644 index 2361b44b6..000000000 --- a/src/tests/l2_vpn_gnmi_oc/tests/MockOSM.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from .WimconnectorIETFL2VPN import WimconnectorIETFL2VPN - -LOGGER = logging.getLogger(__name__) - -class MockOSM: - def __init__(self, url, mapping, username, password): - wim = {'wim_url': url} - wim_account = {'user': username, 'password': password} - config = {'mapping_not_needed': False, 'service_endpoint_mapping': mapping} - self.wim = WimconnectorIETFL2VPN(wim, wim_account, config=config) - self.conn_info = {} # internal database emulating OSM storage provided to WIM Connectors - - def create_connectivity_service(self, service_type, connection_points): - LOGGER.info('[create_connectivity_service] service_type={:s}'.format(str(service_type))) - LOGGER.info('[create_connectivity_service] connection_points={:s}'.format(str(connection_points))) - self.wim.check_credentials() - result = self.wim.create_connectivity_service(service_type, connection_points) - LOGGER.info('[create_connectivity_service] result={:s}'.format(str(result))) - service_uuid, conn_info = result - self.conn_info[service_uuid] = conn_info - return service_uuid - - def get_connectivity_service_status(self, service_uuid): - LOGGER.info('[get_connectivity_service] service_uuid={:s}'.format(str(service_uuid))) - conn_info = self.conn_info.get(service_uuid) - if conn_info is None: raise Exception('ServiceId({:s}) not found'.format(str(service_uuid))) - LOGGER.info('[get_connectivity_service] conn_info={:s}'.format(str(conn_info))) - self.wim.check_credentials() - result = self.wim.get_connectivity_service_status(service_uuid, conn_info=conn_info) - LOGGER.info('[get_connectivity_service] result={:s}'.format(str(result))) - return result - - def edit_connectivity_service(self, service_uuid, connection_points): - LOGGER.info('[edit_connectivity_service] service_uuid={:s}'.format(str(service_uuid))) - LOGGER.info('[edit_connectivity_service] connection_points={:s}'.format(str(connection_points))) - conn_info = self.conn_info.get(service_uuid) - if conn_info is None: raise Exception('ServiceId({:s}) not found'.format(str(service_uuid))) - LOGGER.info('[edit_connectivity_service] conn_info={:s}'.format(str(conn_info))) - self.wim.edit_connectivity_service(service_uuid, conn_info=conn_info, connection_points=connection_points) - - def delete_connectivity_service(self, service_uuid): - LOGGER.info('[delete_connectivity_service] service_uuid={:s}'.format(str(service_uuid))) - conn_info = self.conn_info.get(service_uuid) - if conn_info is None: raise Exception('ServiceId({:s}) not found'.format(str(service_uuid))) - LOGGER.info('[delete_connectivity_service] conn_info={:s}'.format(str(conn_info))) - self.wim.check_credentials() - self.wim.delete_connectivity_service(service_uuid, conn_info=conn_info) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/OSM_Constants.py b/src/tests/l2_vpn_gnmi_oc/tests/OSM_Constants.py deleted file mode 100644 index 13cb57bf2..000000000 --- a/src/tests/l2_vpn_gnmi_oc/tests/OSM_Constants.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Ref: https://osm.etsi.org/wikipub/index.php/WIM -WIM_MAPPING = [ - { - 'device-id' : 'dc1', # pop_switch_dpid - #'device_interface_id' : ??, # pop_switch_port - 'service_endpoint_id' : 'ep-1', # wan_service_endpoint_id - 'service_mapping_info': { # wan_service_mapping_info, other extra info - 'bearer': {'bearer-reference': 'r1:Ethernet10'}, - 'site-id': '1', - }, - #'switch_dpid' : ??, # wan_switch_dpid - #'switch_port' : ??, # wan_switch_port - #'datacenter_id' : ??, # vim_account - }, - { - 'device-id' : 'dc2', # pop_switch_dpid - #'device_interface_id' : ??, # pop_switch_port - 'service_endpoint_id' : 'ep-2', # wan_service_endpoint_id - 'service_mapping_info': { # wan_service_mapping_info, other extra info - 'bearer': {'bearer-reference': 'r3:Ethernet10'}, - 'site-id': '2', - }, - #'switch_dpid' : ??, # wan_switch_dpid - #'switch_port' : ??, # wan_switch_port - #'datacenter_id' : ??, # vim_account - }, -] - -SERVICE_TYPE = 'ELINE' - -SERVICE_CONNECTION_POINTS = [ - {'service_endpoint_id': 'ep-1', - 'service_endpoint_encapsulation_type': 'dot1q', - 'service_endpoint_encapsulation_info': {'vlan': 125}}, - {'service_endpoint_id': 'ep-2', - 'service_endpoint_encapsulation_type': 'dot1q', - 'service_endpoint_encapsulation_info': {'vlan': 125}}, -] diff --git a/src/tests/l2_vpn_gnmi_oc/tests/Tools.py b/src/tests/l2_vpn_gnmi_oc/tests/Tools.py new file mode 100644 index 000000000..bbee845cd --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/Tools.py @@ -0,0 +1,109 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, logging, requests +from typing import Any, Dict, List, Optional, Set, Union +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_service_port_http + +NBI_ADDRESS = get_service_host(ServiceNameEnum.NBI) +NBI_PORT = get_service_port_http(ServiceNameEnum.NBI) +NBI_USERNAME = 'admin' +NBI_PASSWORD = 'admin' +NBI_BASE_URL = '' + +class RestRequestMethod(enum.Enum): + GET = 'get' + POST = 'post' + PUT = 'put' + PATCH = 'patch' + DELETE = 'delete' + +EXPECTED_STATUS_CODES : Set[int] = { + requests.codes['OK' ], + requests.codes['CREATED' ], + requests.codes['ACCEPTED' ], + requests.codes['NO_CONTENT'], +} + +def do_rest_request( + method : RestRequestMethod, url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + request_url = 'http://{:s}:{:s}@{:s}:{:d}{:s}{:s}'.format( + NBI_USERNAME, NBI_PASSWORD, NBI_ADDRESS, NBI_PORT, str(NBI_BASE_URL), url + ) + + if logger is not None: + msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) + if body is not None: msg += ' body={:s}'.format(str(body)) + logger.warning(msg) + reply = requests.request(method.value, request_url, timeout=timeout, json=body, allow_redirects=allow_redirects) + if logger is not None: + logger.warning('Reply: {:s}'.format(str(reply.text))) + assert reply.status_code in expected_status_codes, 'Reply failed with status code {:d}'.format(reply.status_code) + + if reply.content and len(reply.content) > 0: return reply.json() + return None + +def do_rest_get_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.GET, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_post_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.POST, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_put_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.PUT, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_patch_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.PATCH, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_delete_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.DELETE, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/WimconnectorIETFL2VPN.py b/src/tests/l2_vpn_gnmi_oc/tests/WimconnectorIETFL2VPN.py deleted file mode 100644 index de940a7d2..000000000 --- a/src/tests/l2_vpn_gnmi_oc/tests/WimconnectorIETFL2VPN.py +++ /dev/null @@ -1,545 +0,0 @@ -# -*- coding: utf-8 -*- -## -# Copyright 2018 Telefonica -# All Rights Reserved. -# -# Contributors: Oscar Gonzalez de Dios, Manuel Lopez Bravo, Guillermo Pajares Martin -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# This work has been performed in the context of the Metro-Haul project - -# funded by the European Commission under Grant number 761727 through the -# Horizon 2020 program. -## -"""The SDN/WIM connector is responsible for establishing wide area network -connectivity. - -This SDN/WIM connector implements the standard IETF RFC 8466 "A YANG Data - Model for Layer 2 Virtual Private Network (L2VPN) Service Delivery" - -It receives the endpoints and the necessary details to request -the Layer 2 service. -""" -import requests -import uuid -import logging -import copy -#from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError -from .sdnconn import SdnConnectorBase, SdnConnectorError - -"""Check layer where we move it""" - - -class WimconnectorIETFL2VPN(SdnConnectorBase): - def __init__(self, wim, wim_account, config=None, logger=None): - """IETF L2VPN WIM connector - - Arguments: (To be completed) - wim (dict): WIM record, as stored in the database - wim_account (dict): WIM account record, as stored in the database - """ - self.logger = logging.getLogger("ro.sdn.ietfl2vpn") - super().__init__(wim, wim_account, config, logger) - self.headers = {"Content-Type": "application/json"} - self.mappings = { - m["service_endpoint_id"]: m for m in self.service_endpoint_mapping - } - self.user = wim_account.get("user") - self.passwd = wim_account.get("password") # replace "passwordd" -> "password" - - if self.user and self.passwd is not None: - self.auth = (self.user, self.passwd) - else: - self.auth = None - - self.logger.info("IETFL2VPN Connector Initialized.") - - def check_credentials(self): - endpoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format( - self.wim["wim_url"] - ) - - try: - response = requests.get(endpoint, auth=self.auth) - http_code = response.status_code - except requests.exceptions.RequestException as e: - raise SdnConnectorError(e.response, http_code=503) - - if http_code != 200: - raise SdnConnectorError("Failed while authenticating", http_code=http_code) - - self.logger.info("Credentials checked") - - def get_connectivity_service_status(self, service_uuid, conn_info=None): - """Monitor the status of the connectivity service stablished - - Arguments: - service_uuid: Connectivity service unique identifier - - Returns: - Examples:: - {'sdn_status': 'ACTIVE'} - {'sdn_status': 'INACTIVE'} - {'sdn_status': 'DOWN'} - {'sdn_status': 'ERROR'} - """ - try: - self.logger.info("Sending get connectivity service stuatus") - servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format( - self.wim["wim_url"], service_uuid - ) - response = requests.get(servicepoint, auth=self.auth) - self.logger.warning('response.status_code={:s}'.format(str(response.status_code))) - if response.status_code != requests.codes.ok: - raise SdnConnectorError( - "Unable to obtain connectivity servcice status", - http_code=response.status_code, - ) - - service_status = {"sdn_status": "ACTIVE"} - - return service_status - except requests.exceptions.ConnectionError: - raise SdnConnectorError("Request Timeout", http_code=408) - - def search_mapp(self, connection_point): - id = connection_point["service_endpoint_id"] - if id not in self.mappings: - raise SdnConnectorError("Endpoint {} not located".format(str(id))) - else: - return self.mappings[id] - - def create_connectivity_service(self, service_type, connection_points, **kwargs): - """Stablish WAN connectivity between the endpoints - - Arguments: - service_type (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2), - ``L3``. - connection_points (list): each point corresponds to - an entry point from the DC to the transport network. One - connection point serves to identify the specific access and - some other service parameters, such as encapsulation type. - Represented by a dict as follows:: - - { - "service_endpoint_id": ..., (str[uuid]) - "service_endpoint_encapsulation_type": ..., - (enum: none, dot1q, ...) - "service_endpoint_encapsulation_info": { - ... (dict) - "vlan": ..., (int, present if encapsulation is dot1q) - "vni": ... (int, present if encapsulation is vxlan), - "peers": [(ipv4_1), (ipv4_2)] - (present if encapsulation is vxlan) - } - } - - The service endpoint ID should be previously informed to the WIM - engine in the RO when the WIM port mapping is registered. - - Keyword Arguments: - bandwidth (int): value in kilobytes - latency (int): value in milliseconds - - Other QoS might be passed as keyword arguments. - - Returns: - tuple: ``(service_id, conn_info)`` containing: - - *service_uuid* (str): UUID of the established connectivity - service - - *conn_info* (dict or None): Information to be stored at the - database (or ``None``). This information will be provided to - the :meth:`~.edit_connectivity_service` and :obj:`~.delete`. - **MUST** be JSON/YAML-serializable (plain data structures). - - Raises: - SdnConnectorException: In case of error. - """ - SETTINGS = { # min_endpoints, max_endpoints, vpn_service_type - 'ELINE': (2, 2, 'vpws'), # Virtual Private Wire Service - 'ELAN' : (2, None, 'vpls'), # Virtual Private LAN Service - } - settings = SETTINGS.get(service_type) - if settings is None: raise NotImplementedError('Unsupported service_type({:s})'.format(str(service_type))) - min_endpoints, max_endpoints, vpn_service_type = settings - - if max_endpoints is not None and len(connection_points) > max_endpoints: - msg = "Connections between more than {:d} endpoints are not supported for service_type {:s}" - raise SdnConnectorError(msg.format(max_endpoints, service_type)) - - if min_endpoints is not None and len(connection_points) < min_endpoints: - msg = "Connections must be of at least {:d} endpoints for service_type {:s}" - raise SdnConnectorError(msg.format(min_endpoints, service_type)) - - """First step, create the vpn service""" - uuid_l2vpn = str(uuid.uuid4()) - vpn_service = {} - vpn_service["vpn-id"] = uuid_l2vpn - vpn_service["vpn-svc-type"] = vpn_service_type - vpn_service["svc-topo"] = "any-to-any" - vpn_service["customer-name"] = "osm" - vpn_service_list = [] - vpn_service_list.append(vpn_service) - vpn_service_l = {"ietf-l2vpn-svc:vpn-service": vpn_service_list} - response_service_creation = None - conn_info = [] - self.logger.info("Sending vpn-service : {:s}".format(str(vpn_service_l))) - - try: - endpoint_service_creation = ( - "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format( - self.wim["wim_url"] - ) - ) - response_service_creation = requests.post( - endpoint_service_creation, - headers=self.headers, - json=vpn_service_l, - auth=self.auth, - ) - except requests.exceptions.ConnectionError: - raise SdnConnectorError( - "Request to create service Timeout", http_code=408 - ) - - if response_service_creation.status_code == 409: - raise SdnConnectorError( - "Service already exists", - http_code=response_service_creation.status_code, - ) - elif response_service_creation.status_code != requests.codes.created: - raise SdnConnectorError( - "Request to create service not accepted", - http_code=response_service_creation.status_code, - ) - - self.logger.info('connection_points = {:s}'.format(str(connection_points))) - - # Check if protected paths are requested - extended_connection_points = [] - for connection_point in connection_points: - extended_connection_points.append(connection_point) - - connection_point_wan_info = self.search_mapp(connection_point) - service_mapping_info = connection_point_wan_info.get('service_mapping_info', {}) - redundant_service_endpoint_ids = service_mapping_info.get('redundant') - - if redundant_service_endpoint_ids is None: continue - if len(redundant_service_endpoint_ids) == 0: continue - - for redundant_service_endpoint_id in redundant_service_endpoint_ids: - redundant_connection_point = copy.deepcopy(connection_point) - redundant_connection_point['service_endpoint_id'] = redundant_service_endpoint_id - extended_connection_points.append(redundant_connection_point) - - self.logger.info('extended_connection_points = {:s}'.format(str(extended_connection_points))) - - """Second step, create the connections and vpn attachments""" - for connection_point in extended_connection_points: - connection_point_wan_info = self.search_mapp(connection_point) - site_network_access = {} - connection = {} - - if connection_point["service_endpoint_encapsulation_type"] != "none": - if ( - connection_point["service_endpoint_encapsulation_type"] - == "dot1q" - ): - """The connection is a VLAN""" - connection["encapsulation-type"] = "dot1q-vlan-tagged" - tagged = {} - tagged_interf = {} - service_endpoint_encapsulation_info = connection_point[ - "service_endpoint_encapsulation_info" - ] - - if service_endpoint_encapsulation_info["vlan"] is None: - raise SdnConnectorError("VLAN must be provided") - - tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info[ - "vlan" - ] - tagged["dot1q-vlan-tagged"] = tagged_interf - connection["tagged-interface"] = tagged - else: - raise NotImplementedError("Encapsulation type not implemented") - - site_network_access["connection"] = connection - self.logger.info("Sending connection:{}".format(connection)) - vpn_attach = {} - vpn_attach["vpn-id"] = uuid_l2vpn - vpn_attach["site-role"] = vpn_service["svc-topo"] + "-role" - site_network_access["vpn-attachment"] = vpn_attach - self.logger.info("Sending vpn-attachement :{}".format(vpn_attach)) - uuid_sna = str(uuid.uuid4()) - site_network_access["network-access-id"] = uuid_sna - site_network_access["bearer"] = connection_point_wan_info[ - "service_mapping_info" - ]["bearer"] - - access_priority = connection_point_wan_info["service_mapping_info"].get("priority") - if access_priority is not None: - availability = {} - availability["access-priority"] = access_priority - availability["single-active"] = [None] - site_network_access["availability"] = availability - - constraint = {} - constraint['constraint-type'] = 'end-to-end-diverse' - constraint['target'] = {'all-other-accesses': [None]} - - access_diversity = {} - access_diversity['constraints'] = {'constraint': []} - access_diversity['constraints']['constraint'].append(constraint) - site_network_access["access-diversity"] = access_diversity - - site_network_accesses = {} - site_network_access_list = [] - site_network_access_list.append(site_network_access) - site_network_accesses[ - "ietf-l2vpn-svc:site-network-access" - ] = site_network_access_list - conn_info_d = {} - conn_info_d["site"] = connection_point_wan_info["service_mapping_info"][ - "site-id" - ] - conn_info_d["site-network-access-id"] = site_network_access[ - "network-access-id" - ] - conn_info_d["mapping"] = None - conn_info.append(conn_info_d) - - self.logger.info("Sending site_network_accesses : {:s}".format(str(site_network_accesses))) - - try: - endpoint_site_network_access_creation = ( - "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/" - "sites/site={}/site-network-accesses/".format( - self.wim["wim_url"], - connection_point_wan_info["service_mapping_info"][ - "site-id" - ], - ) - ) - response_endpoint_site_network_access_creation = requests.post( - endpoint_site_network_access_creation, - headers=self.headers, - json=site_network_accesses, - auth=self.auth, - ) - - if ( - response_endpoint_site_network_access_creation.status_code - == 409 - ): - self.delete_connectivity_service(vpn_service["vpn-id"]) - - raise SdnConnectorError( - "Site_Network_Access with ID '{}' already exists".format( - site_network_access["network-access-id"] - ), - http_code=response_endpoint_site_network_access_creation.status_code, - ) - elif ( - response_endpoint_site_network_access_creation.status_code - == 400 - ): - self.delete_connectivity_service(vpn_service["vpn-id"]) - - raise SdnConnectorError( - "Site {} does not exist".format( - connection_point_wan_info["service_mapping_info"][ - "site-id" - ] - ), - http_code=response_endpoint_site_network_access_creation.status_code, - ) - elif ( - response_endpoint_site_network_access_creation.status_code - != requests.codes.created - and response_endpoint_site_network_access_creation.status_code - != requests.codes.no_content - ): - self.delete_connectivity_service(vpn_service["vpn-id"]) - - raise SdnConnectorError( - "Request not accepted", - http_code=response_endpoint_site_network_access_creation.status_code, - ) - except requests.exceptions.ConnectionError: - self.delete_connectivity_service(vpn_service["vpn-id"]) - - raise SdnConnectorError("Request Timeout", http_code=408) - - return uuid_l2vpn, conn_info - - def delete_connectivity_service(self, service_uuid, conn_info=None): - """Disconnect multi-site endpoints previously connected - - This method should receive as the first argument the UUID generated by - the ``create_connectivity_service`` - """ - try: - self.logger.info("Sending delete") - servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format( - self.wim["wim_url"], service_uuid - ) - response = requests.delete(servicepoint, auth=self.auth) - - if response.status_code != requests.codes.no_content: - raise SdnConnectorError( - "Error in the request", http_code=response.status_code - ) - except requests.exceptions.ConnectionError: - raise SdnConnectorError("Request Timeout", http_code=408) - - def edit_connectivity_service( - self, service_uuid, conn_info=None, connection_points=None, **kwargs - ): - """Change an existing connectivity service, see - ``create_connectivity_service``""" - # sites = {"sites": {}} - # site_list = [] - vpn_service = {} - vpn_service["svc-topo"] = "any-to-any" - counter = 0 - - for connection_point in connection_points: - site_network_access = {} - connection_point_wan_info = self.search_mapp(connection_point) - params_site = {} - params_site["site-id"] = connection_point_wan_info["service_mapping_info"][ - "site-id" - ] - params_site["site-vpn-flavor"] = "site-vpn-flavor-single" - device_site = {} - device_site["device-id"] = connection_point_wan_info["device-id"] - params_site["devices"] = device_site - # network_access = {} - connection = {} - - if connection_point["service_endpoint_encapsulation_type"] != "none": - if connection_point["service_endpoint_encapsulation_type"] == "dot1q": - """The connection is a VLAN""" - connection["encapsulation-type"] = "dot1q-vlan-tagged" - tagged = {} - tagged_interf = {} - service_endpoint_encapsulation_info = connection_point[ - "service_endpoint_encapsulation_info" - ] - - if service_endpoint_encapsulation_info["vlan"] is None: - raise SdnConnectorError("VLAN must be provided") - - tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info[ - "vlan" - ] - tagged["dot1q-vlan-tagged"] = tagged_interf - connection["tagged-interface"] = tagged - else: - raise NotImplementedError("Encapsulation type not implemented") - - site_network_access["connection"] = connection - vpn_attach = {} - vpn_attach["vpn-id"] = service_uuid - vpn_attach["site-role"] = vpn_service["svc-topo"] + "-role" - site_network_access["vpn-attachment"] = vpn_attach - uuid_sna = conn_info[counter]["site-network-access-id"] - site_network_access["network-access-id"] = uuid_sna - site_network_access["bearer"] = connection_point_wan_info[ - "service_mapping_info" - ]["bearer"] - site_network_accesses = {} - site_network_access_list = [] - site_network_access_list.append(site_network_access) - site_network_accesses[ - "ietf-l2vpn-svc:site-network-access" - ] = site_network_access_list - - try: - endpoint_site_network_access_edit = ( - "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/" - "sites/site={}/site-network-accesses/".format( - self.wim["wim_url"], - connection_point_wan_info["service_mapping_info"]["site-id"], - ) - ) - response_endpoint_site_network_access_creation = requests.put( - endpoint_site_network_access_edit, - headers=self.headers, - json=site_network_accesses, - auth=self.auth, - ) - - if response_endpoint_site_network_access_creation.status_code == 400: - raise SdnConnectorError( - "Service does not exist", - http_code=response_endpoint_site_network_access_creation.status_code, - ) - elif ( - response_endpoint_site_network_access_creation.status_code != 201 - and response_endpoint_site_network_access_creation.status_code - != 204 - ): - raise SdnConnectorError( - "Request no accepted", - http_code=response_endpoint_site_network_access_creation.status_code, - ) - except requests.exceptions.ConnectionError: - raise SdnConnectorError("Request Timeout", http_code=408) - - counter += 1 - - return None - - def clear_all_connectivity_services(self): - """Delete all WAN Links corresponding to a WIM""" - try: - self.logger.info("Sending clear all connectivity services") - servicepoint = ( - "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format( - self.wim["wim_url"] - ) - ) - response = requests.delete(servicepoint, auth=self.auth) - - if response.status_code != requests.codes.no_content: - raise SdnConnectorError( - "Unable to clear all connectivity services", - http_code=response.status_code, - ) - except requests.exceptions.ConnectionError: - raise SdnConnectorError("Request Timeout", http_code=408) - - def get_all_active_connectivity_services(self): - """Provide information about all active connections provisioned by a - WIM - """ - try: - self.logger.info("Sending get all connectivity services") - servicepoint = ( - "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format( - self.wim["wim_url"] - ) - ) - response = requests.get(servicepoint, auth=self.auth) - - if response.status_code != requests.codes.ok: - raise SdnConnectorError( - "Unable to get all connectivity services", - http_code=response.status_code, - ) - - return response - except requests.exceptions.ConnectionError: - raise SdnConnectorError("Request Timeout", http_code=408) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/acknowledgements.txt b/src/tests/l2_vpn_gnmi_oc/tests/acknowledgements.txt deleted file mode 100644 index b7ce926dd..000000000 --- a/src/tests/l2_vpn_gnmi_oc/tests/acknowledgements.txt +++ /dev/null @@ -1,3 +0,0 @@ -MockOSM is based on source code taken from: -https://osm.etsi.org/gitlab/osm/ro/-/blob/master/RO-plugin/osm_ro_plugin/sdnconn.py -https://osm.etsi.org/gitlab/osm/ro/-/blob/master/RO-SDN-ietfl2vpn/osm_rosdn_ietfl2vpn/wimconn_ietfl2vpn.py diff --git a/src/tests/l2_vpn_gnmi_oc/tests/sdnconn.py b/src/tests/l2_vpn_gnmi_oc/tests/sdnconn.py deleted file mode 100644 index a1849c9ef..000000000 --- a/src/tests/l2_vpn_gnmi_oc/tests/sdnconn.py +++ /dev/null @@ -1,242 +0,0 @@ -# -*- coding: utf-8 -*- -## -# Copyright 2018 University of Bristol - High Performance Networks Research -# Group -# All Rights Reserved. -# -# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique -# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# For those usages not covered by the Apache License, Version 2.0 please -# contact with: -# -# Neither the name of the University of Bristol nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# This work has been performed in the context of DCMS UK 5G Testbeds -# & Trials Programme and in the framework of the Metro-Haul project - -# funded by the European Commission under Grant number 761727 through the -# Horizon 2020 and 5G-PPP programmes. -## -"""The SDN connector is responsible for establishing both wide area network connectivity (WIM) -and intranet SDN connectivity. - -It receives information from ports to be connected . -""" - -import logging -from http import HTTPStatus - - -class SdnConnectorError(Exception): - """Base Exception for all connector related errors - provide the parameter 'http_code' (int) with the error code: - Bad_Request = 400 - Unauthorized = 401 (e.g. credentials are not valid) - Not_Found = 404 (e.g. try to edit or delete a non existing connectivity service) - Forbidden = 403 - Method_Not_Allowed = 405 - Not_Acceptable = 406 - Request_Timeout = 408 (e.g timeout reaching server, or cannot reach the server) - Conflict = 409 - Service_Unavailable = 503 - Internal_Server_Error = 500 - """ - - def __init__(self, message, http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value): - Exception.__init__(self, message) - self.http_code = http_code - - -class SdnConnectorBase(object): - """Abstract base class for all the SDN connectors - - Arguments: - wim (dict): WIM record, as stored in the database - wim_account (dict): WIM account record, as stored in the database - config - The arguments of the constructor are converted to object attributes. - An extra property, ``service_endpoint_mapping`` is created from ``config``. - """ - - def __init__(self, wim, wim_account, config=None, logger=None): - """ - :param wim: (dict). Contains among others 'wim_url' - :param wim_account: (dict). Contains among others 'uuid' (internal id), 'name', - 'sdn' (True if is intended for SDN-assist or False if intended for WIM), 'user', 'password'. - :param config: (dict or None): Particular information of plugin. These keys if present have a common meaning: - 'mapping_not_needed': (bool) False by default or if missing, indicates that mapping is not needed. - 'service_endpoint_mapping': (list) provides the internal endpoint mapping. The meaning is: - KEY meaning for WIM meaning for SDN assist - -------- -------- -------- - device_id pop_switch_dpid compute_id - device_interface_id pop_switch_port compute_pci_address - service_endpoint_id wan_service_endpoint_id SDN_service_endpoint_id - service_mapping_info wan_service_mapping_info SDN_service_mapping_info - contains extra information if needed. Text in Yaml format - switch_dpid wan_switch_dpid SDN_switch_dpid - switch_port wan_switch_port SDN_switch_port - datacenter_id vim_account vim_account - id: (internal, do not use) - wim_id: (internal, do not use) - :param logger (logging.Logger): optional logger object. If none is passed 'openmano.sdn.sdnconn' is used. - """ - self.logger = logger or logging.getLogger("ro.sdn") - self.wim = wim - self.wim_account = wim_account - self.config = config or {} - self.service_endpoint_mapping = self.config.get("service_endpoint_mapping", []) - - def check_credentials(self): - """Check if the connector itself can access the SDN/WIM with the provided url (wim.wim_url), - user (wim_account.user), and password (wim_account.password) - - Raises: - SdnConnectorError: Issues regarding authorization, access to - external URLs, etc are detected. - """ - raise NotImplementedError - - def get_connectivity_service_status(self, service_uuid, conn_info=None): - """Monitor the status of the connectivity service established - - Arguments: - service_uuid (str): UUID of the connectivity service - conn_info (dict or None): Information returned by the connector - during the service creation/edition and subsequently stored in - the database. - - Returns: - dict: JSON/YAML-serializable dict that contains a mandatory key - ``sdn_status`` associated with one of the following values:: - - {'sdn_status': 'ACTIVE'} - # The service is up and running. - - {'sdn_status': 'INACTIVE'} - # The service was created, but the connector - # cannot determine yet if connectivity exists - # (ideally, the caller needs to wait and check again). - - {'sdn_status': 'DOWN'} - # Connection was previously established, - # but an error/failure was detected. - - {'sdn_status': 'ERROR'} - # An error occurred when trying to create the service/ - # establish the connectivity. - - {'sdn_status': 'BUILD'} - # Still trying to create the service, the caller - # needs to wait and check again. - - Additionally ``error_msg``(**str**) and ``sdn_info``(**dict**) - keys can be used to provide additional status explanation or - new information available for the connectivity service. - """ - raise NotImplementedError - - def create_connectivity_service(self, service_type, connection_points, **kwargs): - """ - Establish SDN/WAN connectivity between the endpoints - :param service_type: (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2), ``L3``. - :param connection_points: (list): each point corresponds to - an entry point to be connected. For WIM: from the DC to the transport network. - For SDN: Compute/PCI to the transport network. One - connection point serves to identify the specific access and - some other service parameters, such as encapsulation type. - Each item of the list is a dict with: - "service_endpoint_id": (str)(uuid) Same meaning that for 'service_endpoint_mapping' (see __init__) - In case the config attribute mapping_not_needed is True, this value is not relevant. In this case - it will contain the string "device_id:device_interface_id" - "service_endpoint_encapsulation_type": None, "dot1q", ... - "service_endpoint_encapsulation_info": (dict) with: - "vlan": ..., (int, present if encapsulation is dot1q) - "vni": ... (int, present if encapsulation is vxlan), - "peers": [(ipv4_1), (ipv4_2)] (present if encapsulation is vxlan) - "mac": ... - "device_id": ..., same meaning that for 'service_endpoint_mapping' (see __init__) - "device_interface_id": same meaning that for 'service_endpoint_mapping' (see __init__) - "switch_dpid": ..., present if mapping has been found for this device_id,device_interface_id - "swith_port": ... present if mapping has been found for this device_id,device_interface_id - "service_mapping_info": present if mapping has been found for this device_id,device_interface_id - :param kwargs: For future versions: - bandwidth (int): value in kilobytes - latency (int): value in milliseconds - Other QoS might be passed as keyword arguments. - :return: tuple: ``(service_id, conn_info)`` containing: - - *service_uuid* (str): UUID of the established connectivity service - - *conn_info* (dict or None): Information to be stored at the database (or ``None``). - This information will be provided to the :meth:`~.edit_connectivity_service` and :obj:`~.delete`. - **MUST** be JSON/YAML-serializable (plain data structures). - :raises: SdnConnectorException: In case of error. Nothing should be created in this case. - Provide the parameter http_code - """ - raise NotImplementedError - - def delete_connectivity_service(self, service_uuid, conn_info=None): - """ - Disconnect multi-site endpoints previously connected - - :param service_uuid: The one returned by create_connectivity_service - :param conn_info: The one returned by last call to 'create_connectivity_service' or 'edit_connectivity_service' - if they do not return None - :return: None - :raises: SdnConnectorException: In case of error. The parameter http_code must be filled - """ - raise NotImplementedError - - def edit_connectivity_service( - self, service_uuid, conn_info=None, connection_points=None, **kwargs - ): - """Change an existing connectivity service. - - This method's arguments and return value follow the same convention as - :meth:`~.create_connectivity_service`. - - :param service_uuid: UUID of the connectivity service. - :param conn_info: (dict or None): Information previously returned by last call to create_connectivity_service - or edit_connectivity_service - :param connection_points: (list): If provided, the old list of connection points will be replaced. - :param kwargs: Same meaning that create_connectivity_service - :return: dict or None: Information to be updated and stored at the database. - When ``None`` is returned, no information should be changed. - When an empty dict is returned, the database record will be deleted. - **MUST** be JSON/YAML-serializable (plain data structures). - Raises: - SdnConnectorException: In case of error. - """ - - def clear_all_connectivity_services(self): - """Delete all WAN Links in a WIM. - - This method is intended for debugging only, and should delete all the - connections controlled by the WIM/SDN, not only the connections that - a specific RO is aware of. - - Raises: - SdnConnectorException: In case of error. - """ - raise NotImplementedError - - def get_all_active_connectivity_services(self): - """Provide information about all active connections provisioned by a - WIM. - - Raises: - SdnConnectorException: In case of error. - """ - raise NotImplementedError diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py index 76b980c87..480aee615 100644 --- a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py @@ -12,50 +12,43 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging +import json, logging, os from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient -from .Fixtures import ( # pylint: disable=unused-import - # be careful, order of symbols is important here! - osm_wim, context_client -) -from .MockOSM import MockOSM -from .OSM_Constants import SERVICE_CONNECTION_POINTS, SERVICE_TYPE +from .Fixtures import context_client # pylint: disable=unused-import +from .Tools import do_rest_get_request, do_rest_post_request -logging.getLogger('ro.sdn.ietfl2vpn').setLevel(logging.DEBUG) LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) +REQUEST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'ietf-l2vpn-service.json') ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) -EXPECTED_SERVICE_TYPES = (ServiceTypeEnum.SERVICETYPE_L2NM, ServiceTypeEnum.SERVICETYPE_L3NM) + # pylint: disable=redefined-outer-name, unused-argument def test_service_ietf_creation( - osm_wim : MockOSM, context_client : ContextClient + context_client : ContextClient, ): - osm_wim.create_connectivity_service(SERVICE_TYPE, SERVICE_CONNECTION_POINTS) - service_uuid = list(osm_wim.conn_info.keys())[0] # this test adds a single service + # Issue service creation request + with open(REQUEST_FILE, 'r', encoding='UTF-8') as f: + svc1_data = json.load(f) + URL = '/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services' + do_rest_post_request(URL, body=svc1_data, logger=LOGGER, expected_status_codes={201}) + vpn_id = svc1_data['ietf-l2vpn-svc:l2vpn-svc']['vpn-services']['vpn-service'][0]['vpn-id'] - result = osm_wim.get_connectivity_service_status(service_uuid) - assert 'sdn_status' in result - assert result['sdn_status'] == 'ACTIVE' + URL = '/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={:s}/'.format(vpn_id) + service_data = do_rest_get_request(URL, logger=LOGGER, expected_status_codes={200}) + service_uuid = service_data['service-id'] - # Verify the scenario has 1 service and 0 slices + # Verify service was created response = context_client.GetContext(ADMIN_CONTEXT_ID) assert len(response.service_ids) == 1 assert len(response.slice_ids) == 0 - # Check there are no slices - response = context_client.ListSlices(ADMIN_CONTEXT_ID) - LOGGER.warning('Slices[{:d}] = {:s}'.format( - len(response.slices), grpc_message_to_json_string(response) - )) - assert len(response.slices) == 0 - # Check there is 1 service response = context_client.ListServices(ADMIN_CONTEXT_ID) LOGGER.warning('Services[{:d}] = {:s}'.format( @@ -67,7 +60,7 @@ def test_service_ietf_creation( service_id = service.service_id assert service_id.service_uuid.uuid == service_uuid assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE - assert service.service_type in EXPECTED_SERVICE_TYPES + assert service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM response = context_client.ListConnections(service_id) LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( @@ -75,5 +68,3 @@ def test_service_ietf_creation( grpc_message_to_json_string(response) )) assert len(response.connections) == 1 - - raise Exception() diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py index 43b4c48cf..58aa2321d 100644 --- a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py @@ -12,31 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging +import logging, os from typing import Set from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient -from .Fixtures import ( # pylint: disable=unused-import - # be careful, order of symbols is important here! - osm_wim, context_client -) -from .MockOSM import MockOSM +from .Fixtures import context_client # pylint: disable=unused-import +from .Tools import do_rest_delete_request -logging.getLogger('ro.sdn.ietfl2vpn').setLevel(logging.DEBUG) LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) +REQUEST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'ietf-l2vpn-service.json') ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) -EXPECTED_SERVICE_TYPES = (ServiceTypeEnum.SERVICETYPE_L2NM, ServiceTypeEnum.SERVICETYPE_L3NM) # pylint: disable=redefined-outer-name, unused-argument def test_service_ietf_removal( - osm_wim : MockOSM, context_client : ContextClient + context_client : ContextClient, # pylint: disable=redefined-outer-name ): # Verify the scenario has 1 service and 0 slices response = context_client.GetContext(ADMIN_CONTEXT_ID) @@ -61,7 +57,7 @@ def test_service_ietf_removal( for service in response.services: service_id = service.service_id assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE - assert service.service_type == EXPECTED_SERVICE_TYPES + assert service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM response = context_client.ListConnections(service_id) LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( @@ -76,8 +72,8 @@ def test_service_ietf_removal( assert len(service_uuids) == 1 service_uuid = service_uuids.pop() - osm_wim.conn_info[service_uuid] = dict() # delete just needs the placeholder to be populated - osm_wim.delete_connectivity_service(service_uuid) + URL = '/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={:s}/'.format(service_uuid) + do_rest_delete_request(URL, logger=LOGGER, expected_status_codes={204}) # Verify the scenario has no services/slices response = context_client.GetContext(ADMIN_CONTEXT_ID) -- GitLab From 8f09b0e5d0cc27f7f696a217d13fbd4245551799 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 17:50:54 +0000 Subject: [PATCH 68/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed L2VPN descriptor --- src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json index 14185a8c1..25dda200e 100644 --- a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json +++ b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json @@ -19,7 +19,7 @@ "site-network-access": [ { "network-access-id": "eth1", - "type": "ietf-l3vpn-svc:multipoint", + "type": "ietf-l2vpn-svc:multipoint", "device-reference": "dc1", "vpn-attachment": { "vpn-id": "ietf-l2vpn-svc", @@ -46,7 +46,7 @@ "site-network-access": [ { "network-access-id": "eth1", - "type": "ietf-l3vpn-svc:multipoint", + "type": "ietf-l2vpn-svc:multipoint", "device-reference": "dc2", "vpn-attachment": { "vpn-id": "ietf-l2vpn-svc", -- GitLab From 76a5da98056ce55c2362a8774534c857704f187a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 17:51:57 +0000 Subject: [PATCH 69/79] End-to-end test - EUCNC24: - Code Cleanup --- src/tests/eucnc24/tests/Fixtures.py | 13 +++---------- src/tests/eucnc24/tests/test_service_ietf_create.py | 1 - src/tests/eucnc24/tests/test_service_ietf_remove.py | 12 ++++++++---- 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/src/tests/eucnc24/tests/Fixtures.py b/src/tests/eucnc24/tests/Fixtures.py index 5997e58c8..aa37459a1 100644 --- a/src/tests/eucnc24/tests/Fixtures.py +++ b/src/tests/eucnc24/tests/Fixtures.py @@ -15,29 +15,22 @@ import pytest from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient -from monitoring.client.MonitoringClient import MonitoringClient from service.client.ServiceClient import ServiceClient @pytest.fixture(scope='session') -def context_client(): +def context_client() -> ContextClient: _client = ContextClient() yield _client _client.close() @pytest.fixture(scope='session') -def device_client(): +def device_client() -> DeviceClient: _client = DeviceClient() yield _client _client.close() @pytest.fixture(scope='session') -def monitoring_client(): - _client = MonitoringClient() - yield _client - _client.close() - -@pytest.fixture(scope='session') -def service_client(): +def service_client() -> ServiceClient: _client = ServiceClient() yield _client _client.close() diff --git a/src/tests/eucnc24/tests/test_service_ietf_create.py b/src/tests/eucnc24/tests/test_service_ietf_create.py index f3a68801d..c1d761f40 100644 --- a/src/tests/eucnc24/tests/test_service_ietf_create.py +++ b/src/tests/eucnc24/tests/test_service_ietf_create.py @@ -13,7 +13,6 @@ # limitations under the License. import json, logging, os -from typing import Dict from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum from common.tools.grpc.Tools import grpc_message_to_json_string diff --git a/src/tests/eucnc24/tests/test_service_ietf_remove.py b/src/tests/eucnc24/tests/test_service_ietf_remove.py index 2c3920824..d0dad7a2d 100644 --- a/src/tests/eucnc24/tests/test_service_ietf_remove.py +++ b/src/tests/eucnc24/tests/test_service_ietf_remove.py @@ -13,7 +13,7 @@ # limitations under the License. import logging, os -from typing import Dict, Set, Tuple +from typing import Set from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum from common.tools.grpc.Tools import grpc_message_to_json_string @@ -41,12 +41,16 @@ def test_service_ietf_removal( # Check there are no slices response = context_client.ListSlices(ADMIN_CONTEXT_ID) - LOGGER.warning('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + LOGGER.warning('Slices[{:d}] = {:s}'.format( + len(response.slices), grpc_message_to_json_string(response) + )) assert len(response.slices) == 0 # Check there is 1 service response = context_client.ListServices(ADMIN_CONTEXT_ID) - LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + LOGGER.warning('Services[{:d}] = {:s}'.format( + len(response.services), grpc_message_to_json_string(response) + )) assert len(response.services) == 1 service_uuids : Set[str] = set() @@ -66,7 +70,7 @@ def test_service_ietf_removal( # Identify service to delete assert len(service_uuids) == 1 - service_uuid = set(service_uuids).pop() + service_uuid = service_uuids.pop() URL = '/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services/vpn-service={:s}/'.format(service_uuid) do_rest_delete_request(URL, logger=LOGGER, expected_status_codes={204}) -- GitLab From 2e1beccfd6d26c49f9ad8a33784c610e5b785bbc Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 18:04:27 +0000 Subject: [PATCH 70/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed L2VPN descriptor --- src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json index 25dda200e..4c5033bea 100644 --- a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json +++ b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json @@ -5,7 +5,12 @@ "vpn-id": "ietf-l2vpn-svc", "vpn-svc-type": "vpws", "svc-topo": "any-to-any", - "customer-name": "somebody" + "customer-name": "somebody", + "ce-vlan-preservation": true, + "ce-vlan-cos-preservation": true, + "frame-delivery": { + "multicast-gp-port-mapping": "ietf-l2vpn-svc:static-mapping" + } } ]}, "sites": { @@ -15,6 +20,7 @@ "management": {"type": "ietf-l2vpn-svc:provider-managed"}, "locations": {"location": [{"location-id": "DC1"}]}, "devices": {"device": [{"device-id": "dc1", "location": "DC1"}]}, + "default-ce-vlan-id": 1, "site-network-accesses": { "site-network-access": [ { @@ -42,6 +48,7 @@ "management": {"type": "ietf-l2vpn-svc:provider-managed"}, "locations": {"location": [{"location-id": "DC2"}]}, "devices": {"device": [{"device-id": "dc2", "location": "DC2"}]}, + "default-ce-vlan-id": 1, "site-network-accesses": { "site-network-access": [ { -- GitLab From ce1ad0722a41d2e531d0839331609a2021a9dd8b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 18:21:56 +0000 Subject: [PATCH 71/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed L2VPN descriptor --- src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json index 4c5033bea..c7a3d65e0 100644 --- a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json +++ b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json @@ -32,6 +32,8 @@ "site-role": "ietf-l2vpn-svc:any-to-any-role" }, "bearer": {"bearer-reference": "r1:Ethernet10"}, + "oam": {"md-name": "fake-md-name", "md-level": 0}, + "service": {"svc-mtu": 1400}, "connection": { "encapsulation-type": "vlan", "tagged-interface": { @@ -60,6 +62,8 @@ "site-role": "ietf-l2vpn-svc:any-to-any-role" }, "bearer": {"bearer-reference": "r3:Ethernet10"}, + "oam": {"md-name": "fake-md-name", "md-level": 0}, + "service": {"svc-mtu": 1400}, "connection": { "encapsulation-type": "vlan", "tagged-interface": { -- GitLab From e15b54cd9d4ead8983ed9b2787a80fb66e55290f Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 18:31:55 +0000 Subject: [PATCH 72/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed L2VPN descriptor --- src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json index c7a3d65e0..95bea42ea 100644 --- a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json +++ b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json @@ -32,14 +32,14 @@ "site-role": "ietf-l2vpn-svc:any-to-any-role" }, "bearer": {"bearer-reference": "r1:Ethernet10"}, - "oam": {"md-name": "fake-md-name", "md-level": 0}, "service": {"svc-mtu": 1400}, "connection": { "encapsulation-type": "vlan", "tagged-interface": { "type": "ietf-l2vpn-svc:dot1q", "dot1q-vlan-tagged": {"cvlan-id": 125} - } + }, + "oam": {"md-name": "fake-md-name", "md-level": 0} } } ] @@ -62,14 +62,14 @@ "site-role": "ietf-l2vpn-svc:any-to-any-role" }, "bearer": {"bearer-reference": "r3:Ethernet10"}, - "oam": {"md-name": "fake-md-name", "md-level": 0}, "service": {"svc-mtu": 1400}, "connection": { "encapsulation-type": "vlan", "tagged-interface": { "type": "ietf-l2vpn-svc:dot1q", "dot1q-vlan-tagged": {"cvlan-id": 125} - } + }, + "oam": {"md-name": "fake-md-name", "md-level": 0} } } ] -- GitLab From 7aab9fd31daa0a3a4820e69170797cf64f3db091 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 18:43:29 +0000 Subject: [PATCH 73/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed L2VPN descriptor - Fixed README.md - Fixed CLab scenario --- src/tests/l2_vpn_gnmi_oc/README.md | 18 +++++++++++++----- .../clab/l2_vpn_gnmi_oc.clab.yml | 4 ++-- .../data/ietf-l2vpn-service.json | 4 ++-- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/src/tests/l2_vpn_gnmi_oc/README.md b/src/tests/l2_vpn_gnmi_oc/README.md index c05c16826..0bd02d5f9 100644 --- a/src/tests/l2_vpn_gnmi_oc/README.md +++ b/src/tests/l2_vpn_gnmi_oc/README.md @@ -48,20 +48,28 @@ sudo rm -rf clab-l2_vpn_gnmi_oc/ .l2_vpn_gnmi_oc.clab.yml.bak ```bash docker exec -it clab-l2_vpn_gnmi_oc-r1 bash docker exec -it clab-l2_vpn_gnmi_oc-r2 bash +docker exec -it clab-l2_vpn_gnmi_oc-r3 bash docker exec -it clab-l2_vpn_gnmi_oc-r1 Cli docker exec -it clab-l2_vpn_gnmi_oc-r2 Cli +docker exec -it clab-l2_vpn_gnmi_oc-r3 Cli ``` ## Configure ContainerLab clients ```bash docker exec -it clab-l2_vpn_gnmi_oc-dc1 bash - ip address add 172.16.1.10/24 dev eth1 - ip route add 172.16.2.0/24 via 172.16.1.1 - ping 172.16.2.10 + ip link set address 00:c1:ab:00:01:0a dev eth1 + ip link set eth1 up + ip link add link eth1 name eth1.125 type vlan id 125 + ip address add 172.16.1.10/24 dev eth1.125 + ip link set eth1.125 up + ping 172.16.1.20 docker exec -it clab-l2_vpn_gnmi_oc-dc2 bash - ip address add 172.16.2.10/24 dev eth1 - ip route add 172.16.1.0/24 via 172.16.2.1 + ip link set address 00:c1:ab:00:01:14 dev eth1 + ip link set eth1 up + ip link add link eth1 name eth1.125 type vlan id 125 + ip address add 172.16.1.20/24 dev eth1.125 + ip link set eth1.125 up ping 172.16.1.10 ``` diff --git a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml index 765ee3ef3..611dd90b3 100644 --- a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml +++ b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml @@ -60,7 +60,7 @@ topology: - ip link set address 00:c1:ab:00:01:0a dev eth1 - ip link set eth1 up - ip link add link eth1 name eth1.125 type vlan id 125 - - ip addr add 172.16.1.10/24 dev eth1.125 + - ip address add 172.16.1.10/24 dev eth1.125 - ip link set eth1.125 up dc2: @@ -70,7 +70,7 @@ topology: - ip link set address 00:c1:ab:00:01:14 dev eth1 - ip link set eth1 up - ip link add link eth1 name eth1.125 type vlan id 125 - - ip addr add 172.16.1.20/24 dev eth1.125 + - ip address add 172.16.1.20/24 dev eth1.125 - ip link set eth1.125 up links: diff --git a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json index 95bea42ea..b87fc9512 100644 --- a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json +++ b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json @@ -17,7 +17,7 @@ "site": [ { "site-id": "site_DC1", - "management": {"type": "ietf-l2vpn-svc:provider-managed"}, + "management": {"type": "ietf-l2vpn-svc:customer-managed"}, "locations": {"location": [{"location-id": "DC1"}]}, "devices": {"device": [{"device-id": "dc1", "location": "DC1"}]}, "default-ce-vlan-id": 1, @@ -47,7 +47,7 @@ }, { "site-id": "site_DC2", - "management": {"type": "ietf-l2vpn-svc:provider-managed"}, + "management": {"type": "ietf-l2vpn-svc:customer-managed"}, "locations": {"location": [{"location-id": "DC2"}]}, "devices": {"device": [{"device-id": "dc2", "location": "DC2"}]}, "default-ce-vlan-id": 1, -- GitLab From 6c6788a0173ef1ffc1f084f3368ccd2c28088385 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 19:21:21 +0000 Subject: [PATCH 74/79] End-to-end test - L2 VPN gNMI OpenConfig: - Fixed L2VPN descriptor --- src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json index b87fc9512..95bea42ea 100644 --- a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json +++ b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json @@ -17,7 +17,7 @@ "site": [ { "site-id": "site_DC1", - "management": {"type": "ietf-l2vpn-svc:customer-managed"}, + "management": {"type": "ietf-l2vpn-svc:provider-managed"}, "locations": {"location": [{"location-id": "DC1"}]}, "devices": {"device": [{"device-id": "dc1", "location": "DC1"}]}, "default-ce-vlan-id": 1, @@ -47,7 +47,7 @@ }, { "site-id": "site_DC2", - "management": {"type": "ietf-l2vpn-svc:customer-managed"}, + "management": {"type": "ietf-l2vpn-svc:provider-managed"}, "locations": {"location": [{"location-id": "DC2"}]}, "devices": {"device": [{"device-id": "dc2", "location": "DC2"}]}, "default-ce-vlan-id": 1, -- GitLab From 0f59626ba99b3253f440cabd2cffc947f4192b13 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 19:37:35 +0000 Subject: [PATCH 75/79] NBI component - IETF L2VPN connector: - Fixed checks of site management type --- src/nbi/service/ietf_l2vpn/Handlers.py | 10 ++++++---- .../service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/nbi/service/ietf_l2vpn/Handlers.py b/src/nbi/service/ietf_l2vpn/Handlers.py index 9fe7e2a0a..1b8fc2b93 100644 --- a/src/nbi/service/ietf_l2vpn/Handlers.py +++ b/src/nbi/service/ietf_l2vpn/Handlers.py @@ -264,9 +264,9 @@ def process_site_network_access( def process_site(site : Dict, errors : List[Dict]) -> None: site_id = site['site-id'] - # this change is made for ECOC2025 demo purposes - if site['management']['type'] != 'provider-managed': - # if site['management']['type'] == 'customer-managed': + site_management_type = site['management']['type'] + site_management_type = site_management_type.replace('ietf-l2vpn-svc:', '') + if site_management_type != 'provider-managed': MSG = 'Site Management Type: {:s}' raise NotImplementedError(MSG.format(str(site['management']['type']))) @@ -275,7 +275,9 @@ def process_site(site : Dict, errors : List[Dict]) -> None: process_site_network_access(site_id, network_access, errors) def update_vpn(site : Dict, errors : List[Dict]) -> None: - if site['management']['type'] != 'provider-managed': + site_management_type = site['management']['type'] + site_management_type = site_management_type.replace('ietf-l2vpn-svc:', '') + if site_management_type != 'provider-managed': MSG = 'Site Management Type: {:s}' raise NotImplementedError(MSG.format(str(site['management']['type']))) diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index bd9706f31..76fec0479 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -151,7 +151,7 @@ class L2VPN_SiteNetworkAccesses(Resource): 'sites': {'site': [{ 'site-id': site_id, 'default-ce-vlan-id': 1, - 'management': {'type': 'customer-managed'}, + 'management': {'type': 'provider-managed'}, 'locations': {'location': [ {'location-id': location_ref} for location_ref in location_refs -- GitLab From 4d9c183e51aa8df7789dd5f8b208b72e052b9dea Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 19:55:09 +0000 Subject: [PATCH 76/79] CI/CD pipeline: - Reactivated tests --- .gitlab-ci.yml | 76 +++++++++++----------- src/tests/.gitlab-ci.yml | 34 +++++----- src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml | 84 ++++++++++++------------- 3 files changed, 97 insertions(+), 97 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6627e11cb..53763f5e1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -28,44 +28,44 @@ workflow: # include the individual .gitlab-ci.yml of each micro-service and tests include: -# #- local: '/manifests/.gitlab-ci.yml' -# - local: '/src/monitoring/.gitlab-ci.yml' -# - local: '/src/nbi/.gitlab-ci.yml' -# - local: '/src/context/.gitlab-ci.yml' -# - local: '/src/device/.gitlab-ci.yml' -# - local: '/src/service/.gitlab-ci.yml' -# - local: '/src/qkd_app/.gitlab-ci.yml' -# - local: '/src/dbscanserving/.gitlab-ci.yml' -# - local: '/src/opticalattackmitigator/.gitlab-ci.yml' -# - local: '/src/opticalattackdetector/.gitlab-ci.yml' -# - local: '/src/opticalattackmanager/.gitlab-ci.yml' -# - local: '/src/opticalcontroller/.gitlab-ci.yml' -# - local: '/src/ztp/.gitlab-ci.yml' -# - local: '/src/policy/.gitlab-ci.yml' -# - local: '/src/automation/.gitlab-ci.yml' -# - local: '/src/forecaster/.gitlab-ci.yml' -# #- local: '/src/webui/.gitlab-ci.yml' -# #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' -# #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' -# #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' -# - local: '/src/slice/.gitlab-ci.yml' -# #- local: '/src/interdomain/.gitlab-ci.yml' -# - local: '/src/pathcomp/.gitlab-ci.yml' -# #- local: '/src/dlt/.gitlab-ci.yml' -# - local: '/src/load_generator/.gitlab-ci.yml' -# - local: '/src/bgpls_speaker/.gitlab-ci.yml' -# - local: '/src/kpi_manager/.gitlab-ci.yml' -# - local: '/src/kpi_value_api/.gitlab-ci.yml' -# #- local: '/src/kpi_value_writer/.gitlab-ci.yml' -# #- local: '/src/telemetry/.gitlab-ci.yml' -# - local: '/src/analytics/.gitlab-ci.yml' -# - local: '/src/qos_profile/.gitlab-ci.yml' -# - local: '/src/vnt_manager/.gitlab-ci.yml' -# - local: '/src/e2e_orchestrator/.gitlab-ci.yml' -# - local: '/src/ztp_server/.gitlab-ci.yml' -# - local: '/src/osm_client/.gitlab-ci.yml' -# - local: '/src/simap_connector/.gitlab-ci.yml' -# - local: '/src/pluggables/.gitlab-ci.yml' + #- local: '/manifests/.gitlab-ci.yml' + - local: '/src/monitoring/.gitlab-ci.yml' + - local: '/src/nbi/.gitlab-ci.yml' + - local: '/src/context/.gitlab-ci.yml' + - local: '/src/device/.gitlab-ci.yml' + - local: '/src/service/.gitlab-ci.yml' + - local: '/src/qkd_app/.gitlab-ci.yml' + - local: '/src/dbscanserving/.gitlab-ci.yml' + - local: '/src/opticalattackmitigator/.gitlab-ci.yml' + - local: '/src/opticalattackdetector/.gitlab-ci.yml' + - local: '/src/opticalattackmanager/.gitlab-ci.yml' + - local: '/src/opticalcontroller/.gitlab-ci.yml' + - local: '/src/ztp/.gitlab-ci.yml' + - local: '/src/policy/.gitlab-ci.yml' + - local: '/src/automation/.gitlab-ci.yml' + - local: '/src/forecaster/.gitlab-ci.yml' + #- local: '/src/webui/.gitlab-ci.yml' + #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' + #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' + #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' + - local: '/src/slice/.gitlab-ci.yml' + #- local: '/src/interdomain/.gitlab-ci.yml' + - local: '/src/pathcomp/.gitlab-ci.yml' + #- local: '/src/dlt/.gitlab-ci.yml' + - local: '/src/load_generator/.gitlab-ci.yml' + - local: '/src/bgpls_speaker/.gitlab-ci.yml' + - local: '/src/kpi_manager/.gitlab-ci.yml' + - local: '/src/kpi_value_api/.gitlab-ci.yml' + #- local: '/src/kpi_value_writer/.gitlab-ci.yml' + #- local: '/src/telemetry/.gitlab-ci.yml' + - local: '/src/analytics/.gitlab-ci.yml' + - local: '/src/qos_profile/.gitlab-ci.yml' + - local: '/src/vnt_manager/.gitlab-ci.yml' + - local: '/src/e2e_orchestrator/.gitlab-ci.yml' + - local: '/src/ztp_server/.gitlab-ci.yml' + - local: '/src/osm_client/.gitlab-ci.yml' + - local: '/src/simap_connector/.gitlab-ci.yml' + - local: '/src/pluggables/.gitlab-ci.yml' # This should be last one: end-to-end integration tests - local: '/src/tests/.gitlab-ci.yml' diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index 7472b8f43..267d7ac23 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -14,22 +14,22 @@ # include the individual .gitlab-ci.yml of each end-to-end integration test include: -# - local: '/src/tests/ofc22/.gitlab-ci.yml' -# #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' -# - local: '/src/tests/ecoc22/.gitlab-ci.yml' -# #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' -# #- local: '/src/tests/ofc23/.gitlab-ci.yml' -# - local: '/src/tests/ofc24/.gitlab-ci.yml' -# - local: '/src/tests/eucnc24/.gitlab-ci.yml' -# #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' -# #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' -# #- local: '/src/tests/ofc25/.gitlab-ci.yml' -# - local: '/src/tests/ryu-openflow/.gitlab-ci.yml' -# - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' -# - local: '/src/tests/acl_end2end/.gitlab-ci.yml' + - local: '/src/tests/ofc22/.gitlab-ci.yml' + #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' + - local: '/src/tests/ecoc22/.gitlab-ci.yml' + #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' + #- local: '/src/tests/ofc23/.gitlab-ci.yml' + - local: '/src/tests/ofc24/.gitlab-ci.yml' + - local: '/src/tests/eucnc24/.gitlab-ci.yml' + #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' + #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' + #- local: '/src/tests/ofc25/.gitlab-ci.yml' + - local: '/src/tests/ryu-openflow/.gitlab-ci.yml' + - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' + - local: '/src/tests/acl_end2end/.gitlab-ci.yml' - local: '/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml' -# - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' -# - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' -# - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' -# - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' + - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' + - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' + - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' + - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' diff --git a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml index 38a473f6c..310770df5 100644 --- a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml +++ b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml @@ -145,10 +145,10 @@ end2end_test l2_vpn_gnmi_oc: # Configure TeraFlowSDN deployment # Uncomment if DEBUG log level is needed for the components #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml - - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml - - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml - - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml - source src/tests/${TEST_NAME}/deploy_specs.sh #- export TFS_REGISTRY_IMAGES="${CI_REGISTRY_IMAGE}" @@ -214,45 +214,45 @@ end2end_test l2_vpn_gnmi_oc: - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" -# # Run end-to-end test: configure service TFS -# - > -# docker run -t --rm --name ${TEST_NAME} --network=host -# --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" -# --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" -# $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-create.sh -# -# # Give time to routers for being configured and stabilized -# - sleep 60 -# -# # Dump configuration of the routers (after configure TFS service) -# - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" -# - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" -# - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" -# -# # Run end-to-end test: test connectivity with ping -# - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" -# - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 3 received, 0% packet loss" -# - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" -# -# # Run end-to-end test: deconfigure service TFS -# - > -# docker run -t --rm --name ${TEST_NAME} --network=host -# --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" -# --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" -# $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-remove.sh -# -# # Give time to routers for being configured and stabilized -# - sleep 60 -# -# # Dump configuration of the routers (after deconfigure TFS service) -# - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" -# - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" -# - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" -# -# # Run end-to-end test: test no connectivity with ping -# - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" -# - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" -# - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" + # Run end-to-end test: configure service TFS + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-create.sh + + # Give time to routers for being configured and stabilized + - sleep 60 + + # Dump configuration of the routers (after configure TFS service) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Run end-to-end test: test connectivity with ping + - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" + + # Run end-to-end test: deconfigure service TFS + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-remove.sh + + # Give time to routers for being configured and stabilized + - sleep 60 + + # Dump configuration of the routers (after deconfigure TFS service) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Run end-to-end test: test no connectivity with ping + - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" # Run end-to-end test: configure service IETF - > -- GitLab From 59e78e8ceb786ce7e091096c33b001ea132b0bcb Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 22:34:17 +0000 Subject: [PATCH 77/79] NBI component - IETF L2VPN connector: - Fixed checks of site location --- .../ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 76fec0479..e607d938c 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -88,12 +88,24 @@ class L2VPN_SiteNetworkAccesses(Resource): location_refs = set() location_refs.add('fake-location') + device_refs = dict() + device_refs['fake-device'] = 'fake-location' + # Add mandatory fields OSM RO driver skips and fix wrong ones for site_network_access in site_network_accesses: if 'location-reference' in site_network_access: - location_refs.add(site_network_access['location-reference']) + location = site_network_access['location-reference'] + else: + location = 'fake-location' + site_network_access['location-reference'] = location + location_refs.add(location) + + if 'device-reference' in site_network_access: + device = site_network_access['device-reference'] else: - site_network_access['location-reference'] = 'fake-location' + device = 'fake-device' + site_network_access['device-reference'] = device + device_refs[device] = location if 'connection' in site_network_access: connection = site_network_access['connection'] @@ -156,6 +168,10 @@ class L2VPN_SiteNetworkAccesses(Resource): {'location-id': location_ref} for location_ref in location_refs ]}, + 'devices': {'device': [ + {'device-id': device_ref, 'location': location_ref} + for device_ref, location_ref in device_refs.items() + ]}, 'site-network-accesses': { 'site-network-access': site_network_accesses } -- GitLab From a8de4509346ef9460c3e37bf3c67361944f2a37d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 22 Jan 2026 22:49:49 +0000 Subject: [PATCH 78/79] NBI component - IETF L2VPN connector: - Fixed checks of site location --- src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index e607d938c..2f027c0d5 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -164,10 +164,10 @@ class L2VPN_SiteNetworkAccesses(Resource): 'site-id': site_id, 'default-ce-vlan-id': 1, 'management': {'type': 'provider-managed'}, - 'locations': {'location': [ - {'location-id': location_ref} - for location_ref in location_refs - ]}, + #'locations': {'location': [ + # {'location-id': location_ref} + # for location_ref in location_refs + #]}, 'devices': {'device': [ {'device-id': device_ref, 'location': location_ref} for device_ref, location_ref in device_refs.items() -- GitLab From c94618530b41deea1cd3d29e673ab6e3deeee241 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 23 Jan 2026 07:40:55 +0000 Subject: [PATCH 79/79] NBI component - IETF L2VPN connector: - Fixed checks of site location --- .../ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 2f027c0d5..e11beb62c 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -93,11 +93,12 @@ class L2VPN_SiteNetworkAccesses(Resource): # Add mandatory fields OSM RO driver skips and fix wrong ones for site_network_access in site_network_accesses: + location = 'fake-location' if 'location-reference' in site_network_access: location = site_network_access['location-reference'] - else: - location = 'fake-location' - site_network_access['location-reference'] = location + site_network_access.pop('location-reference') + #else: + # site_network_access['location-reference'] = location location_refs.add(location) if 'device-reference' in site_network_access: @@ -164,10 +165,10 @@ class L2VPN_SiteNetworkAccesses(Resource): 'site-id': site_id, 'default-ce-vlan-id': 1, 'management': {'type': 'provider-managed'}, - #'locations': {'location': [ - # {'location-id': location_ref} - # for location_ref in location_refs - #]}, + 'locations': {'location': [ + {'location-id': location_ref} + for location_ref in location_refs + ]}, 'devices': {'device': [ {'device-id': device_ref, 'location': location_ref} for device_ref, location_ref in device_refs.items() @@ -179,7 +180,7 @@ class L2VPN_SiteNetworkAccesses(Resource): }} MSG = '[_prepare_request_payload] request_data={:s}' - LOGGER.debug(MSG.format(str(request_data))) + LOGGER.warning(MSG.format(str(request_data))) return request_data errors.append('Unexpected request: {:s}'.format(str(request_data))) -- GitLab