Upcoming maintenance: Thursday 21 August @ 12:00-14:00 CEST.

Skip to content
Snippets Groups Projects
Commit d8a114e3 authored by Lluis Gifre Renom's avatar Lluis Gifre Renom
Browse files

Tests - Ryu-OpenFlow integration test:

- Add files
parent fbf12733
No related branches found
No related tags found
2 merge requests!359Release TeraFlowSDN 5.0,!296Resolve "(CTTC) Add OpenFlow support through Ryu SDN controller"
Showing
with 936 additions and 28 deletions
......@@ -24,5 +24,6 @@ include:
- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml'
- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml'
#- local: '/src/tests/ofc25/.gitlab-ci.yml'
#- local: '/src/tests/ryu-openflow/.gitlab-ci.yml'
- local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml'
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Build, tag, and push the Docker image to the GitLab Docker registry
build ryu-openflow:
variables:
TEST_NAME: 'ryu-openflow'
stage: build
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker buildx build -t "${TEST_NAME}:latest" -f ./src/tests/${TEST_NAME}/Dockerfile .
- docker tag "${TEST_NAME}:latest" "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest"
- docker push "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest"
after_script:
- docker images --filter="dangling=true" --quiet | xargs -r docker rmi
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- changes:
- src/common/**/*.py
- proto/*.proto
- src/tests/${TEST_NAME}/**/*.{py,in,sh,yml}
- src/tests/${TEST_NAME}/Dockerfile
- .gitlab-ci.yml
# Deploy TeraFlowSDN and Execute end-2-end test
end2end_test ryu-openflow:
timeout: 90m
variables:
TEST_NAME: 'ryu-openflow'
stage: end2end_test
# Disable to force running it after all other tasks
#needs:
# - build ryu-openflow
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- docker rm -f ${TEST_NAME} || true
- containerlab destroy --all --cleanup || true
script:
# Download Docker image to run the test
- docker pull "${CI_REGISTRY_IMAGE}/${TEST_NAME}:latest"
# Check MicroK8s is ready
- microk8s status --wait-ready
- kubectl get pods --all-namespaces
# Deploy ContainerLab Scenario
- RUNNER_PATH=`pwd`
#- cd $PWD/src/tests/${TEST_NAME}
- mkdir -p /tmp/clab/${TEST_NAME}
- cp -R src/tests/${TEST_NAME}/clab/* /tmp/clab/${TEST_NAME}
- tree -la /tmp/clab/${TEST_NAME}
- cd /tmp/clab/${TEST_NAME}
- containerlab deploy --reconfigure --topo ryu-openflow.clab.yml
- cd $RUNNER_PATH
# Wait for initialization of Device NOSes
- sleep 3
- docker ps -a
# Dump configuration of the routers (before any configuration)
- containerlab exec --name ryu-openflow --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
- containerlab exec --name ryu-openflow --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
- containerlab exec --name ryu-openflow --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
# Configure TeraFlowSDN deployment
# Uncomment if DEBUG log level is needed for the components
#- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml
#- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml
#- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml
#- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml
#- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml
#- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/monitoringservice.yaml
- source src/tests/${TEST_NAME}/deploy_specs.sh
#- export TFS_REGISTRY_IMAGES="${CI_REGISTRY_IMAGE}"
#- export TFS_SKIP_BUILD="YES"
#- export TFS_IMAGE_TAG="latest"
#- echo "TFS_REGISTRY_IMAGES=${CI_REGISTRY_IMAGE}"
# Deploy TeraFlowSDN
- ./deploy/crdb.sh
- ./deploy/nats.sh
- ./deploy/kafka.sh
- ./deploy/qdb.sh
- ./deploy/tfs.sh
- ./deploy/show.sh
## Wait for Context to be subscribed to NATS
## WARNING: this loop is infinite if there is no subscriber (such as monitoring).
## Investigate if we can use a counter to limit the number of iterations.
## For now, keep it commented out.
#- LOOP_MAX_ATTEMPTS=180
#- LOOP_COUNTER=0
#- >
# while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do
# echo "Attempt: $LOOP_COUNTER"
# kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1;
# sleep 1;
# LOOP_COUNTER=$((LOOP_COUNTER + 1))
# if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then
# echo "Max attempts reached, exiting the loop."
# break
# fi
# done
- kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server
# Run end-to-end test: onboard scenario
- >
docker run -t --rm --name ${TEST_NAME} --network=host
--volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh"
--volume "$PWD/src/tests/${TEST_NAME}:/opt/results"
$CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-onboarding.sh
# Run end-to-end test: configure service TFS
- >
docker run -t --rm --name ${TEST_NAME} --network=host
--volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh"
--volume "$PWD/src/tests/${TEST_NAME}:/opt/results"
$CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-create.sh
# Dump configuration of the routers (after configure TFS service)
- containerlab exec --name ryu-openflow --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
- containerlab exec --name ryu-openflow --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
- containerlab exec --name ryu-openflow --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
# Run end-to-end test: test connectivity with ping
- export TEST1_10=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json)
- echo $TEST1_10
- echo $TEST1_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss'
- export TEST1_1=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json)
- echo $TEST1_1
- echo $TEST1_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss'
- export TEST2_1=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json)
- echo $TEST2_1
- echo $TEST2_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss'
- export TEST2_10=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json)
- echo $TEST2_10
- echo $TEST2_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss'
- export TEST3_1=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json)
- echo $TEST3_1
- echo $TEST3_1 | grep -E '3 packets transmitted, 0 received, 100\% packet loss'
- export TEST3_10=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json)
- echo $TEST3_10
- echo $TEST3_10 | grep -E '3 packets transmitted, 0 received, 100\% packet loss'
# Run end-to-end test: deconfigure service TFS
- >
docker run -t --rm --name ${TEST_NAME} --network=host
--volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh"
--volume "$PWD/src/tests/${TEST_NAME}:/opt/results"
$CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-remove.sh
# Dump configuration of the routers (after deconfigure TFS service)
- containerlab exec --name ryu-openflow --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
- containerlab exec --name ryu-openflow --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
- containerlab exec --name ryu-openflow --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
# Run end-to-end test: configure service IETF
- >
docker run -t --rm --name ${TEST_NAME} --network=host
--volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh"
--volume "$PWD/src/tests/${TEST_NAME}:/opt/results"
$CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-create.sh
# Dump configuration of the routers (after configure IETF service)
- containerlab exec --name ryu-openflow --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
- containerlab exec --name ryu-openflow --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
- containerlab exec --name ryu-openflow --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
# Run end-to-end test: test connectivity with ping
- export TEST1_10=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json)
- echo $TEST1_10
- echo $TEST1_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss'
- export TEST1_1=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json)
- echo $TEST1_1
- echo $TEST1_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss'
- export TEST2_1=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json)
- echo $TEST2_1
- echo $TEST2_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss'
- export TEST2_10=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json)
- echo $TEST2_10
- echo $TEST2_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss'
- export TEST3_1=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json)
- echo $TEST3_1
- echo $TEST3_1 | grep -E '3 packets transmitted, 0 received, 100\% packet loss'
- export TEST3_10=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json)
- echo $TEST3_10
- echo $TEST3_10 | grep -E '3 packets transmitted, 0 received, 100\% packet loss'
# Run end-to-end test: deconfigure service IETF
- >
docker run -t --rm --name ${TEST_NAME} --network=host
--volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh"
--volume "$PWD/src/tests/${TEST_NAME}:/opt/results"
$CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-remove.sh
# Dump configuration of the routers (after deconfigure IETF service)
- containerlab exec --name ryu-openflow --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
- containerlab exec --name ryu-openflow --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
- containerlab exec --name ryu-openflow --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
# Run end-to-end test: cleanup scenario
- >
docker run -t --rm --name ${TEST_NAME} --network=host
--volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh"
--volume "$PWD/src/tests/${TEST_NAME}:/opt/results"
$CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-cleanup.sh
after_script:
# Dump configuration of the routers (on after_script)
- containerlab exec --name ryu-openflow --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
- containerlab exec --name ryu-openflow --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
- containerlab exec --name ryu-openflow --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\""
# Dump TeraFlowSDN component logs
- source src/tests/${TEST_NAME}/deploy_specs.sh
- kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server
- kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/deviceservice -c server
- kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/pathcompservice -c frontend
- kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/serviceservice -c server
- kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/nbiservice -c server
#- kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringservice -c server
# Destroy Scenario
- docker rm -f ${TEST_NAME} || true
- RUNNER_PATH=`pwd`
#- cd $PWD/src/tests/${TEST_NAME}
- cd /tmp/clab/${TEST_NAME}
- containerlab destroy --topo ryu-openflow.clab.yml --cleanup || true
- sudo rm -rf clab-ryu-openflow/ .ryu-openflow.clab.yml.bak || true
- cd $RUNNER_PATH
- kubectl delete namespaces tfs || true
# Clean old docker images
- docker images --filter="dangling=true" --quiet | xargs -r docker rmi
#coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
artifacts:
when: always
reports:
junit: ./src/tests/${TEST_NAME}/report_*.xml
# Control of OpenFlow domain through Ryu SDN controller and TeraFlowSDN
## TeraFlowSDN Deployment
```bash
cd ~/tfs-ctrl
source ~/tfs-ctrl/src/tests/ryu-openflow/deploy_specs.sh
./deploy/all.sh
```
## Download and install Mininet
```bash
sudo apt-get install "mininet=2.3.0-1ubuntu1"
```
## Deploy SDN controller and dataplane
```bash
cd ~/tfs-ctrl/src/tests/ryu-openflow/
docker compose build # or docker buildx build --no-cache -t "ryu-image:dev" -f ./Ryu.Dockerfile .
docker compose up -d # or docker run -d -p 6653:6653 -p 8080:8080 ryu-image:dev
sudo python3 custom_pentagon_topology.py
```
## Destroy scenario
```bash
cd ~/tfs-ctrl/src/tests/ryu-openflow/
docker compose down
# Ctrl+C mininet dataplane
```
## Onboard scenario
- Through TFS WebUI
## Request connectivity service
```bash
cd ~/tfs-ctrl/src/tests/ryu-openflow/
curl -X POST \
--header "Content-Type: application/json" \
--data @ietf-l3vpn-service.json \
--user "admin:admin" \
http://127.0.0.1/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services
```
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM python:3.9-slim
# Install dependencies
RUN apt-get --yes --quiet --quiet update && \
apt-get --yes --quiet --quiet install wget g++ git && \
rm -rf /var/lib/apt/lists/*
# Set Python to show logs as they occur
ENV PYTHONUNBUFFERED=0
# Get generic Python packages
RUN python3 -m pip install --upgrade pip
RUN python3 -m pip install --upgrade setuptools wheel
RUN python3 -m pip install --upgrade pip-tools
# Get common Python packages
# Note: this step enables sharing the previous Docker build steps among all the Python components
WORKDIR /var/teraflow
COPY common_requirements.in common_requirements.in
RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
RUN python3 -m pip install -r common_requirements.txt
# Add common files into working directory
WORKDIR /var/teraflow/common
COPY src/common/. ./
RUN rm -rf proto
# Create proto sub-folder, copy .proto files, and generate Python code
RUN mkdir -p /var/teraflow/common/proto
WORKDIR /var/teraflow/common/proto
RUN touch __init__.py
COPY proto/*.proto ./
RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
RUN rm *.proto
RUN find . -type f -exec sed -i -E 's/^(import\ .*)_pb2/from . \1_pb2/g' {} \;
# Create component sub-folders, get specific Python packages
RUN mkdir -p /var/teraflow/tests/ryu-openflow
WORKDIR /var/teraflow/tests/ryu-openflow
COPY src/tests/ryu-openflow/requirements.in requirements.in
RUN pip-compile --quiet --output-file=requirements.txt requirements.in
RUN python3 -m pip install -r requirements.txt
# Add component files into working directory
WORKDIR /var/teraflow
COPY src/__init__.py ./__init__.py
COPY src/common/*.py ./common/
COPY src/common/tests/. ./common/tests/
COPY src/common/tools/. ./common/tools/
COPY src/context/__init__.py context/__init__.py
COPY src/context/client/. context/client/
COPY src/device/__init__.py device/__init__.py
COPY src/device/client/. device/client/
COPY src/monitoring/__init__.py monitoring/__init__.py
COPY src/monitoring/client/. monitoring/client/
COPY src/service/__init__.py service/__init__.py
COPY src/service/client/. service/client/
COPY src/slice/__init__.py slice/__init__.py
COPY src/slice/client/. slice/client/
COPY src/vnt_manager/__init__.py vnt_manager/__init__.py
COPY src/vnt_manager/client/. vnt_manager/client/
COPY src/tests/*.py ./tests/
COPY src/tests/ryu-openflow/__init__.py ./tests/ryu-openflow/__init__.py
COPY src/tests/ryu-openflow/data/. ./tests/ryu-openflow/data/
COPY src/tests/ryu-openflow/tests/. ./tests/ryu-openflow/tests/
COPY src/tests/ryu-openflow/scripts/. ./
RUN apt-get --yes --quiet --quiet update && \
apt-get --yes --quiet --quiet install tree && \
rm -rf /var/lib/apt/lists/*
RUN tree -la /var/teraflow
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
sudo mn -c
ryu-manager --observe-links ryu.app.ofctl_rest ryu.app.gui_topology.gui_topology
sudo python3 ~/tfs-ctrl/src/tests/ryu-openflow/custom_pentagon_topology.py
curl -X POST \
--header "Content-Type: application/json" \
--data @ietf-l3vpn-service.json \
--user "admin:admin" \
http://127.0.0.1/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services
4. Build and run
# from ryu-docker/
docker compose build # or: docker build -t ryu-local .
docker compose up -d # or: docker run -d --network host ryu-local
You should now see the usual Ryu banner in docker compose logs -f.
5. Start Mininet on the host
sudo mn --topo single,3 \
--controller remote,ip=127.0.0.1,port=6653 \
--switch ovs,protocols=OpenFlow13
127.0.0.1 works because the container is in host network mode.
If you prefer normal bridge networking, publish the port instead:
docker run -d -p 6653:6653 -p 8080:8080 ryu-local
# …then tell Mininet ip=<host-IP>
Verify the switch talks OpenFlow 1.3 (protocols=OpenFlow13) unless your app requires 1.0.
#!/bin/bash
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----- TeraFlowSDN ------------------------------------------------------------
# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy.
#export TFS_COMPONENTS="context device pathcomp service slice nbi webui"
export TFS_COMPONENTS="context device pathcomp service nbi"
# Uncomment to activate Monitoring (old)
#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
# Uncomment to activate Monitoring Framework (new)
#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation"
# Uncomment to activate QoS Profiles
#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile"
# Uncomment to activate BGP-LS Speaker
#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
# Uncomment to activate Optical Controller
# To manage optical connections, "service" requires "opticalcontroller" to be deployed
# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it.
#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
# BEFORE="${TFS_COMPONENTS% service*}"
# AFTER="${TFS_COMPONENTS#* service}"
# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}"
#fi
# Uncomment to activate ZTP
#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp"
# Uncomment to activate Policy Manager
#export TFS_COMPONENTS="${TFS_COMPONENTS} policy"
# Uncomment to activate Optical CyberSecurity
#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
# Uncomment to activate L3 CyberSecurity
#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
# Uncomment to activate TE
#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
# Uncomment to activate Forecaster
#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster"
# Uncomment to activate E2E Orchestrator
#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator"
# Uncomment to activate VNT Manager
#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager"
# Uncomment to activate DLT and Interdomain
#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt"
#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then
# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk"
# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem"
# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt"
#fi
# Uncomment to activate QKD App
# To manage QKD Apps, "service" requires "qkd_app" to be deployed
# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it.
#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
# BEFORE="${TFS_COMPONENTS% service*}"
# AFTER="${TFS_COMPONENTS#* service}"
# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}"
#fi
# Uncomment to activate Load Generator
#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator"
# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev"
# Set the name of the Kubernetes namespace to deploy TFS to.
export TFS_K8S_NAMESPACE="tfs"
# Set additional manifest files to be applied after the deployment
export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
# Uncomment to monitor performance of components
#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
# Uncomment when deploying Optical CyberSecurity
#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
# Set the new Grafana admin password
export TFS_GRAFANA_PASSWORD="admin123+"
# Disable skip-build flag to rebuild the Docker images.
export TFS_SKIP_BUILD=""
# ----- CockroachDB ------------------------------------------------------------
# Set the namespace where CockroackDB will be deployed.
export CRDB_NAMESPACE="crdb"
# Set the external port CockroackDB Postgre SQL interface will be exposed to.
export CRDB_EXT_PORT_SQL="26257"
# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
export CRDB_EXT_PORT_HTTP="8081"
# Set the database username to be used by Context.
export CRDB_USERNAME="tfs"
# Set the database user's password to be used by Context.
export CRDB_PASSWORD="tfs123"
# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
export CRDB_DEPLOY_MODE="single"
# Disable flag for dropping database, if it exists.
export CRDB_DROP_DATABASE_IF_EXISTS="YES"
# Disable flag for re-deploying CockroachDB from scratch.
export CRDB_REDEPLOY=""
# ----- NATS -------------------------------------------------------------------
# Set the namespace where NATS will be deployed.
export NATS_NAMESPACE="nats"
# Set the external port NATS Client interface will be exposed to.
export NATS_EXT_PORT_CLIENT="4222"
# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
export NATS_EXT_PORT_HTTP="8222"
# Set NATS installation mode to 'single'. This option is convenient for development and testing.
# See ./deploy/all.sh or ./deploy/nats.sh for additional details
export NATS_DEPLOY_MODE="single"
# Disable flag for re-deploying NATS from scratch.
export NATS_REDEPLOY=""
# ----- QuestDB ----------------------------------------------------------------
# Set the namespace where QuestDB will be deployed.
export QDB_NAMESPACE="qdb"
# Set the external port QuestDB Postgre SQL interface will be exposed to.
export QDB_EXT_PORT_SQL="8812"
# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
export QDB_EXT_PORT_ILP="9009"
# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
export QDB_EXT_PORT_HTTP="9000"
# Set the database username to be used for QuestDB.
export QDB_USERNAME="admin"
# Set the database user's password to be used for QuestDB.
export QDB_PASSWORD="quest"
# Set the table name to be used by Monitoring for KPIs.
export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
# Set the table name to be used by Slice for plotting groups.
export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
# Disable flag for dropping tables if they exist.
export QDB_DROP_TABLES_IF_EXIST="YES"
# Disable flag for re-deploying QuestDB from scratch.
export QDB_REDEPLOY=""
# ----- K8s Observability ------------------------------------------------------
# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
export PROM_EXT_PORT_HTTP="9090"
# Set the external port Grafana HTTP Dashboards will be exposed to.
export GRAF_EXT_PORT_HTTP="3000"
# ----- Apache Kafka -----------------------------------------------------------
# Set the namespace where Apache Kafka will be deployed.
export KFK_NAMESPACE="kafka"
# Set the port Apache Kafka server will be exposed to.
export KFK_SERVER_PORT="9092"
# Set the flag to YES for redeploying of Apache Kafka
export KFK_REDEPLOY=""
#!/bin/bash
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source ~/tfs-ctrl/src/tests/ryu-openflow/deploy_specs.sh
./deploy/all.sh
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
requests==2.27.*
#!/bin/bash
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source /var/teraflow/tfs_runtime_env_vars.sh
export PYTHONPATH=/var/teraflow
pytest --verbose --log-level=INFO \
--junitxml=/opt/results/report_cleanup.xml \
/var/teraflow/tests/ryu-openflow/tests/test_cleanup.py
#!/bin/bash
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source /var/teraflow/tfs_runtime_env_vars.sh
export PYTHONPATH=/var/teraflow
pytest --verbose --log-level=INFO \
--junitxml=/opt/results/report_onboarding.xml \
/var/teraflow/tests/ryu-openflow/tests/test_onboarding.py
#!/bin/bash
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source /var/teraflow/tfs_runtime_env_vars.sh
export PYTHONPATH=/var/teraflow
pytest --verbose --log-level=INFO \
--junitxml=/opt/results/report_service_ietf_create.xml \
/var/teraflow/tests/ryu-openflow/tests/test_service_ietf_create.py
#!/bin/bash
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source /var/teraflow/tfs_runtime_env_vars.sh
export PYTHONPATH=/var/teraflow
pytest --verbose --log-level=INFO \
--junitxml=/opt/results/report_service_ietf_remove.xml \
/var/teraflow/tests/ryu-openflow/tests/test_service_ietf_remove.py
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from context.client.ContextClient import ContextClient
from device.client.DeviceClient import DeviceClient
from monitoring.client.MonitoringClient import MonitoringClient
from service.client.ServiceClient import ServiceClient
@pytest.fixture(scope='session')
def context_client():
_client = ContextClient()
yield _client
_client.close()
@pytest.fixture(scope='session')
def device_client():
_client = DeviceClient()
yield _client
_client.close()
@pytest.fixture(scope='session')
def monitoring_client():
_client = MonitoringClient()
yield _client
_client.close()
@pytest.fixture(scope='session')
def service_client():
_client = ServiceClient()
yield _client
_client.close()
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum, logging, requests
from typing import Any, Dict, List, Optional, Set, Union
from common.Constants import ServiceNameEnum
from common.Settings import get_service_host, get_service_port_http
NBI_ADDRESS = get_service_host(ServiceNameEnum.NBI)
NBI_PORT = get_service_port_http(ServiceNameEnum.NBI)
NBI_USERNAME = 'admin'
NBI_PASSWORD = 'admin'
NBI_BASE_URL = ''
class RestRequestMethod(enum.Enum):
GET = 'get'
POST = 'post'
PUT = 'put'
PATCH = 'patch'
DELETE = 'delete'
EXPECTED_STATUS_CODES : Set[int] = {
requests.codes['OK' ],
requests.codes['CREATED' ],
requests.codes['ACCEPTED' ],
requests.codes['NO_CONTENT'],
}
def do_rest_request(
method : RestRequestMethod, url : str, body : Optional[Any] = None, timeout : int = 10,
allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES,
logger : Optional[logging.Logger] = None
) -> Optional[Union[Dict, List]]:
request_url = 'http://{:s}:{:s}@{:s}:{:d}{:s}{:s}'.format(
NBI_USERNAME, NBI_PASSWORD, NBI_ADDRESS, NBI_PORT, str(NBI_BASE_URL), url
)
if logger is not None:
msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url))
if body is not None: msg += ' body={:s}'.format(str(body))
logger.warning(msg)
reply = requests.request(method.value, request_url, timeout=timeout, json=body, allow_redirects=allow_redirects)
if logger is not None:
logger.warning('Reply: {:s}'.format(str(reply.text)))
assert reply.status_code in expected_status_codes, 'Reply failed with status code {:d}'.format(reply.status_code)
if reply.content and len(reply.content) > 0: return reply.json()
return None
def do_rest_get_request(
url : str, body : Optional[Any] = None, timeout : int = 10,
allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES,
logger : Optional[logging.Logger] = None
) -> Optional[Union[Dict, List]]:
return do_rest_request(
RestRequestMethod.GET, url, body=body, timeout=timeout, allow_redirects=allow_redirects,
expected_status_codes=expected_status_codes, logger=logger
)
def do_rest_post_request(
url : str, body : Optional[Any] = None, timeout : int = 10,
allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES,
logger : Optional[logging.Logger] = None
) -> Optional[Union[Dict, List]]:
return do_rest_request(
RestRequestMethod.POST, url, body=body, timeout=timeout, allow_redirects=allow_redirects,
expected_status_codes=expected_status_codes, logger=logger
)
def do_rest_put_request(
url : str, body : Optional[Any] = None, timeout : int = 10,
allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES,
logger : Optional[logging.Logger] = None
) -> Optional[Union[Dict, List]]:
return do_rest_request(
RestRequestMethod.PUT, url, body=body, timeout=timeout, allow_redirects=allow_redirects,
expected_status_codes=expected_status_codes, logger=logger
)
def do_rest_patch_request(
url : str, body : Optional[Any] = None, timeout : int = 10,
allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES,
logger : Optional[logging.Logger] = None
) -> Optional[Union[Dict, List]]:
return do_rest_request(
RestRequestMethod.PATCH, url, body=body, timeout=timeout, allow_redirects=allow_redirects,
expected_status_codes=expected_status_codes, logger=logger
)
def do_rest_delete_request(
url : str, body : Optional[Any] = None, timeout : int = 10,
allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES,
logger : Optional[logging.Logger] = None
) -> Optional[Union[Dict, List]]:
return do_rest_request(
RestRequestMethod.DELETE, url, body=body, timeout=timeout, allow_redirects=allow_redirects,
expected_status_codes=expected_status_codes, logger=logger
)
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging, os
from common.Constants import DEFAULT_CONTEXT_NAME
from common.proto.context_pb2 import ContextId
from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario
from common.tools.object_factory.Context import json_context_id
from context.client.ContextClient import ContextClient
from device.client.DeviceClient import DeviceClient
from .Fixtures import context_client, device_client # pylint: disable=unused-import
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-topology.json')
ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
def test_scenario_cleanup(
context_client : ContextClient, # pylint: disable=redefined-outer-name
device_client : DeviceClient, # pylint: disable=redefined-outer-name
) -> None:
# Verify the scenario has no services/slices
response = context_client.GetContext(ADMIN_CONTEXT_ID)
assert len(response.service_ids) == 0
assert len(response.slice_ids) == 0
# Load descriptors and validate the base scenario
descriptor_loader = DescriptorLoader(
descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client)
descriptor_loader.validate()
descriptor_loader.unload()
validate_empty_scenario(context_client)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment