Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tfs/controller
1 result
Show changes
Commits on Source (27)
Showing
with 693 additions and 311 deletions
......@@ -16,44 +16,46 @@
stages:
#- dependencies
- build
- prepare
- unit_test
- end2end_test
# include the individual .gitlab-ci.yml of each micro-service and tests
include:
#- local: '/manifests/.gitlab-ci.yml'
- local: '/src/monitoring/.gitlab-ci.yml'
- local: '/src/nbi/.gitlab-ci.yml'
- local: '/src/context/.gitlab-ci.yml'
#- local: '/src/monitoring/.gitlab-ci.yml'
#- local: '/src/nbi/.gitlab-ci.yml'
#- local: '/src/context/.gitlab-ci.yml'
- local: '/src/device/.gitlab-ci.yml'
- local: '/src/service/.gitlab-ci.yml'
- local: '/src/dbscanserving/.gitlab-ci.yml'
- local: '/src/opticalattackmitigator/.gitlab-ci.yml'
- local: '/src/opticalattackdetector/.gitlab-ci.yml'
- local: '/src/opticalattackmanager/.gitlab-ci.yml'
- local: '/src/opticalcontroller/.gitlab-ci.yml'
- local: '/src/ztp/.gitlab-ci.yml'
- local: '/src/policy/.gitlab-ci.yml'
- local: '/src/automation/.gitlab-ci.yml'
- local: '/src/forecaster/.gitlab-ci.yml'
- local: '/src/qkd_app/.gitlab-ci.yml'
#- local: '/src/dbscanserving/.gitlab-ci.yml'
#- local: '/src/opticalattackmitigator/.gitlab-ci.yml'
#- local: '/src/opticalattackdetector/.gitlab-ci.yml'
#- local: '/src/opticalattackmanager/.gitlab-ci.yml'
#- local: '/src/opticalcontroller/.gitlab-ci.yml'
#- local: '/src/ztp/.gitlab-ci.yml'
#- local: '/src/policy/.gitlab-ci.yml'
#- local: '/src/automation/.gitlab-ci.yml'
#- local: '/src/forecaster/.gitlab-ci.yml'
#- local: '/src/webui/.gitlab-ci.yml'
#- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml'
#- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml'
#- local: '/src/l3_attackmitigator/.gitlab-ci.yml'
- local: '/src/slice/.gitlab-ci.yml'
#- local: '/src/slice/.gitlab-ci.yml'
#- local: '/src/interdomain/.gitlab-ci.yml'
- local: '/src/pathcomp/.gitlab-ci.yml'
#- local: '/src/pathcomp/.gitlab-ci.yml'
#- local: '/src/dlt/.gitlab-ci.yml'
- local: '/src/load_generator/.gitlab-ci.yml'
- local: '/src/bgpls_speaker/.gitlab-ci.yml'
- local: '/src/kpi_manager/.gitlab-ci.yml'
- local: '/src/kpi_value_api/.gitlab-ci.yml'
- local: '/src/kpi_value_writer/.gitlab-ci.yml'
- local: '/src/telemetry/.gitlab-ci.yml'
- local: '/src/analytics/.gitlab-ci.yml'
- local: '/src/qos_profile/.gitlab-ci.yml'
- local: '/src/vnt_manager/.gitlab-ci.yml'
- local: '/src/e2e_orchestrator/.gitlab-ci.yml'
#- local: '/src/load_generator/.gitlab-ci.yml'
#- local: '/src/bgpls_speaker/.gitlab-ci.yml'
#- local: '/src/kpi_manager/.gitlab-ci.yml'
#- local: '/src/kpi_value_api/.gitlab-ci.yml'
#- local: '/src/kpi_value_writer/.gitlab-ci.yml'
#- local: '/src/telemetry/.gitlab-ci.yml'
#- local: '/src/analytics/.gitlab-ci.yml'
#- local: '/src/qos_profile/.gitlab-ci.yml'
#- local: '/src/vnt_manager/.gitlab-ci.yml'
#- local: '/src/e2e_orchestrator/.gitlab-ci.yml'
# This should be last one: end-to-end integration tests
- local: '/src/tests/.gitlab-ci.yml'
......@@ -80,11 +80,11 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui"
# To manage QKD Apps, "service" requires "qkd_app" to be deployed
# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the
# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it.
#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
# BEFORE="${TFS_COMPONENTS% service*}"
# AFTER="${TFS_COMPONENTS#* service}"
# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}"
#fi
if [[ "$TFS_COMPONENTS" == *"service"* ]]; then
BEFORE="${TFS_COMPONENTS% service*}"
AFTER="${TFS_COMPONENTS#* service}"
export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}"
fi
# Uncomment to activate Load Generator
#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator"
......@@ -137,7 +137,7 @@ export CRDB_DEPLOY_MODE="single"
export CRDB_DROP_DATABASE_IF_EXISTS=""
# Disable flag for re-deploying CockroachDB from scratch.
export CRDB_REDEPLOY=""
export CRDB_REDEPLOY="YES"
# ----- NATS -------------------------------------------------------------------
......
......@@ -12,11 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
stages:
- build
- prepare
- unit_test
# Build, tag, and push the Docker image to the GitLab Docker registry
build device:
build_device:
variables:
IMAGE_NAME: 'device' # name of the microservice
IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
IMAGE_NAME: 'device'
IMAGE_TAG: 'latest'
stage: build
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
......@@ -30,113 +35,136 @@ build device:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- changes:
- src/common/**/*.py
- proto/*.proto
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
- src/common/**/*.py
- proto/*.proto
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
## Start Mock QKD Nodes before unit testing
#start_mock_nodes:
# stage: deploy
# script:
# - bash src/tests/tools/mock_qkd_nodes/start.sh &
# - sleep 10 # wait for nodes to spin up
# artifacts:
# paths:
# - mock_nodes.log
# rules:
# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
# Deploy mock QKD nodes
prepare_mock_qkd_nodes:
stage: prepare
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- if docker network list | grep teraflowbridge; then echo "teraflowbridge network is already created"; else docker network create --driver=bridge teraflowbridge; fi
- |
# Context-related cleanup
if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi
if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi
if docker container ls | grep nats; then docker rm -f nats; else echo "NATS container is not in the system"; fi
## Prepare Scenario (Start NBI, mock services)
#prepare_scenario:
# stage: deploy
# script:
# - pytest src/tests/qkd/unit/PrepareScenario.py
# needs:
# - start_mock_nodes
# rules:
# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
script:
- docker volume create crdb
- docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080 cockroachdb/cockroach:latest-v22.2 start-single-node
- docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222 nats:2.9 --http_port 8222 --user tfs --pass tfs123
- echo "Waiting for initialization..."
- while ! docker logs crdb 2>&1 | grep -q 'finished creating default user "tfs"'; do sleep 1; done
- while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done
- MOCK_NODES_DIR="$CI_PROJECT_DIR/controller/src/tests/tools/mock_qkd_nodes"
- |
if [ -d "$MOCK_NODES_DIR" ]; then
cd "$MOCK_NODES_DIR" && ./start.sh &
MOCK_NODES_PID=$!
else
echo "Error: Mock QKD nodes directory '$MOCK_NODES_DIR' not found." && exit 1;
fi
- echo "Waiting for mock nodes to be up..."
- RETRY_COUNT=0
- MAX_RETRIES=15
- |
while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
if curl -s http://127.0.0.1:11111 > /dev/null && \
curl -s http://127.0.0.1:22222 > /dev/null && \
curl -s http://127.0.0.1:33333 > /dev/null; then
echo "Mock nodes are up!"
break
else
echo "Mock nodes not ready, retrying in 5 seconds..."
RETRY_COUNT=$((RETRY_COUNT + 1))
sleep 5
fi
done
- |
if [ $RETRY_COUNT -ge $MAX_RETRIES ]; then
echo "Error: Mock nodes failed to start after multiple attempts."
exit 1
fi
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- changes:
- src/common/**/*.py
- proto/*.proto
- src/device/**/*.{py,in,yml}
- src/device/Dockerfile
- src/device/tests/*.py
- src/tests/tools/mock_qkd_nodes/**
- .gitlab-ci.yml
# Apply unit test to the component
unit_test device:
unit_test_device:
variables:
IMAGE_NAME: 'device' # name of the microservice
IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
IMAGE_NAME: 'device'
IMAGE_TAG: 'latest'
stage: unit_test
needs:
- build device
#- start_mock_nodes
#- prepare_scenario
- build_device
- prepare_mock_qkd_nodes
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- >
if docker network list | grep teraflowbridge; then
echo "teraflowbridge is already created";
else
docker network create -d bridge teraflowbridge;
fi
- >
if docker container ls | grep $IMAGE_NAME; then
docker rm -f $IMAGE_NAME;
else
echo "$IMAGE_NAME image is not in the system";
fi
script:
- docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker run --name $IMAGE_NAME -d -p 2020:2020 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- docker run --name $IMAGE_NAME --network=teraflowbridge -d -p 2020:2020 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" -e PYTHONPATH="/var/teraflow:/var/teraflow/device:/var/teraflow/tests/tools/mock_qkd_nodes:/var/teraflow/tests" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- sleep 5
- docker ps -a
- docker logs $IMAGE_NAME
- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary_emulated.py --junitxml=/opt/results/${IMAGE_NAME}_report_emulated.xml"
- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary_ietf_actn.py --junitxml=/opt/results/${IMAGE_NAME}_report_ietf_actn.xml"
#- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/qkd/unit/test_*.py"
- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose device/tests/qkd/unit/test_qkd_mock_connectivity.py"
- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose device/tests/qkd/unit/test_qkd_compliance.py"
- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose device/tests/qkd/unit/test_mock_qkd_node.py"
- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose device/tests/qkd/unit/test_qkd_error_handling.py"
- docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
- docker rm -f $IMAGE_NAME
- docker rm -f pathcomp-frontend pathcomp-backend device context crdb nats
- docker volume rm -f crdb
- docker network rm teraflowbridge
- docker volume prune --force
- docker image prune --force
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- changes:
- src/common/**/*.py
- proto/*.proto
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
- src/$IMAGE_NAME/tests/Dockerfile
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
- src/common/**/*.py
- proto/*.proto
- src/$IMAGE_NAME/**/*.{py,in,yml}
- src/$IMAGE_NAME/Dockerfile
- src/$IMAGE_NAME/tests/*.py
- src/$IMAGE_NAME/tests/Dockerfile
- src/tests/tools/mock_qkd_nodes/**
- manifests/${IMAGE_NAME}service.yaml
- .gitlab-ci.yml
artifacts:
when: always
reports:
junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report_*.xml
when: always
reports:
junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report_*.xml
## Deployment of the service in Kubernetes Cluster
#deploy device:
#deploy_device:
# variables:
# IMAGE_NAME: 'device' # name of the microservice
# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
# stage: deploy
# needs:
# - unit test device
# # - integ_test execute
# - unit_test_device
# script:
# - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
# - kubectl version
# - kubectl get all
# - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
# - kubectl get all
# # environment:
# # name: test
# # url: https://example.com
# # kubernetes:
# # namespace: test
# rules:
# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
# when: manual
......
......@@ -16,16 +16,20 @@ import pytest
import requests
from requests.exceptions import ConnectionError
MOCK_QKD_ADDRESS = '127.0.0.1'
MOCK_PORT = 11111
def test_mock_qkd_node_responses():
response = requests.get('http://127.0.0.1:11111/restconf/data/etsi-qkd-sdn-node:qkd_node')
response = requests.get(f'http://{MOCK_QKD_ADDRESS}:{MOCK_PORT}/restconf/data/etsi-qkd-sdn-node:qkd_node')
assert response.status_code == 200
data = response.json()
assert 'qkd_node' in data
def test_mock_node_failure_scenarios():
try:
response = requests.get('http://127.0.0.1:12345/restconf/data/etsi-qkd-sdn-node:qkd_node')
response = requests.get(f'http://{MOCK_QKD_ADDRESS}:12345/restconf/data/etsi-qkd-sdn-node:qkd_node')
except ConnectionError as e:
assert isinstance(e, ConnectionError)
else:
pytest.fail("ConnectionError not raised as expected")
pytest.fail("ConnectionError not raised as expected")
\ No newline at end of file
......@@ -15,10 +15,18 @@
import pytest
import requests
from requests.exceptions import HTTPError
from tests.tools.mock_qkd_nodes.YangValidator import YangValidator
def test_compliance_with_yang_models():
validator = YangValidator('etsi-qkd-sdn-node', ['etsi-qkd-node-types'])
response = requests.get('http://127.0.0.1:11111/restconf/data/etsi-qkd-sdn-node:qkd_node')
data = response.json()
assert validator.parse_to_dict(data) is not None
try:
response = requests.get('http://127.0.0.1:11111/restconf/data/etsi-qkd-sdn-node:qkd_node')
response.raise_for_status()
data = response.json()
assert validator.parse_to_dict(data) is not None, "Data validation failed against YANG model."
except HTTPError as e:
pytest.fail(f"HTTP error occurred: {e}")
except Exception as e:
pytest.fail(f"Unexpected error occurred: {e}")
......@@ -40,7 +40,7 @@ def test_invalid_operations_on_network_links(qkd_driver):
try:
# Attempt to perform an invalid operation (simulate wrong resource key)
response = requests.post(f'http://{qkd_driver.address}/invalid_resource', json=invalid_payload)
response = requests.post(f'http://{qkd_driver.address}:{qkd_driver.port}/invalid_resource', json=invalid_payload)
response.raise_for_status()
except HTTPError as e:
......
......@@ -12,16 +12,35 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest, requests
import pytest
import requests
import time
import socket
from unittest.mock import patch
from device.service.drivers.qkd.QKDDriver import QKDDriver
from device.service.drivers.qkd.QKDDriver2 import QKDDriver
MOCK_QKD_ADDRRESS = '127.0.0.1'
MOCK_QKD_ADDRESS = '127.0.0.1' # Use localhost to connect to the mock node in the Docker container
MOCK_PORT = 11111
@pytest.fixture(scope="module")
def wait_for_mock_node():
"""
Fixture to wait for the mock QKD node to be ready before running tests.
"""
timeout = 30 # seconds
start_time = time.time()
while True:
try:
with socket.create_connection((MOCK_QKD_ADDRESS, MOCK_PORT), timeout=1):
break # Success
except (socket.timeout, socket.error):
if time.time() - start_time > timeout:
raise RuntimeError("Timed out waiting for mock QKD node to be ready.")
time.sleep(1)
@pytest.fixture
def qkd_driver():
return QKDDriver(address=MOCK_QKD_ADDRRESS, port=MOCK_PORT, username='user', password='pass')
def qkd_driver(wait_for_mock_node):
return QKDDriver(address=MOCK_QKD_ADDRESS, port=MOCK_PORT, username='user', password='pass')
# Deliverable Test ID: SBI_Test_01
def test_qkd_driver_connection(qkd_driver):
......@@ -29,7 +48,7 @@ def test_qkd_driver_connection(qkd_driver):
# Deliverable Test ID: SBI_Test_01
def test_qkd_driver_invalid_connection():
qkd_driver = QKDDriver(address='127.0.0.1', port=12345, username='user', password='pass') # Use invalid port directly
qkd_driver = QKDDriver(address=MOCK_QKD_ADDRESS, port=12345, username='user', password='pass') # Use invalid port directly
assert qkd_driver.Connect() is False
# Deliverable Test ID: SBI_Test_10
......@@ -38,4 +57,3 @@ def test_qkd_driver_timeout_connection(mock_get, qkd_driver):
mock_get.side_effect = requests.exceptions.Timeout
qkd_driver.timeout = 0.001 # Simulate very short timeout
assert qkd_driver.Connect() is False
......@@ -58,6 +58,50 @@ unit_test app:
- docker logs $IMAGE_NAME
- docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
- docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
# Mock QKD Nodes Deployment
- |
echo "Starting stage: deploy_mock_nodes"
- pip install flask # Install Flask to ensure it is available
- |
for port in 11111 22222 33333; do
if lsof -i:$port >/dev/null 2>&1; then
echo "Freeing up port $port..."
fuser -k $port/tcp
fi
done
MOCK_NODES_DIR="$PWD/src/tests/tools/mock_qkd_nodes"
if [ -d "$MOCK_NODES_DIR" ]; then
cd "$MOCK_NODES_DIR" || exit
./start.sh &
MOCK_NODES_PID=$!
else
echo "Error: Mock QKD nodes directory '$MOCK_NODES_DIR' not found."
exit 1
fi
echo "Waiting for mock nodes to be up..."
RETRY_COUNT=0
MAX_RETRIES=15
while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
if curl -s http://127.0.0.1:11111 > /dev/null && \
curl -s http://127.0.0.1:22222 > /dev/null && \
curl -s http://127.0.0.1:33333 > /dev/null; then
echo "Mock nodes are up!"
break
else
echo "Mock nodes not ready, retrying in 5 seconds..."
RETRY_COUNT=$((RETRY_COUNT + 1))
sleep 5
fi
done
if [ $RETRY_COUNT -ge $MAX_RETRIES ]; then
echo "Error: Mock nodes failed to start after multiple attempts."
exit 1
fi
# Run additional QKD unit tests
- docker exec -i $IMAGE_NAME bash -c "pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_create_apps.py"
- docker exec -i $IMAGE_NAME bash -c "pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_Set_new_configuration.py"
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
- docker rm -f $IMAGE_NAME
......
......@@ -14,7 +14,7 @@
import requests
QKD_ADDRESS = '10.0.2.10'
QKD_ADDRESS = '127.0.0.1'
QKD_URL = 'http://{:s}/qkd_app/create_qkd_app'.format(QKD_ADDRESS)
QKD_REQUEST_1 = {
......
......@@ -53,7 +53,7 @@ def create_qkd_app(driver, qkdn_id, backing_qkdl_id, client_app_id=None):
print(f"Sending payload to {driver.address}: {app_payload}")
# Send POST request to create the application
response = requests.post(f'http://{driver.address}/app/create_qkd_app', json=app_payload)
response = requests.post(f'http://{driver.address}/qkd_app/create_qkd_app', json=app_payload)
# Check if the request was successful (HTTP 2xx)
response.raise_for_status()
......
......@@ -49,22 +49,22 @@ unit_test service:
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge teraflowbridge; fi
- |
# Context-related cleanup
if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi
if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi
if docker container ls | grep nats; then docker rm -f nats; else echo "NATS container is not in the system"; fi
# Context-related
- if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi
- if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi
- if docker container ls | grep nats; then docker rm -f nats; else echo "NATS container is not in the system"; fi
# Device-related cleanup
if docker container ls | grep context; then docker rm -f context; else echo "context image is not in the system"; fi
if docker container ls | grep device; then docker rm -f device; else echo "device image is not in the system"; fi
# Device-related
- if docker container ls | grep context; then docker rm -f context; else echo "context image is not in the system"; fi
- if docker container ls | grep device; then docker rm -f device; else echo "device image is not in the system"; fi
# Pathcomp-related cleanup
if docker container ls | grep pathcomp-frontend; then docker rm -f pathcomp-frontend; else echo "pathcomp-frontend image is not in the system"; fi
if docker container ls | grep pathcomp-backend; then docker rm -f pathcomp-backend; else echo "pathcomp-backend image is not in the system"; fi
# Pathcomp-related
- if docker container ls | grep pathcomp-frontend; then docker rm -f pathcomp-frontend; else echo "pathcomp-frontend image is not in the system"; fi
- if docker container ls | grep pathcomp-backend; then docker rm -f pathcomp-backend; else echo "pathcomp-backend image is not in the system"; fi
# Service-related
- if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
# Service-related cleanup
if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
script:
- docker pull "cockroachdb/cockroach:latest-v22.2"
......@@ -76,87 +76,128 @@ unit_test service:
- docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
# Context preparation
- docker volume create crdb
- >
docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080
--env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123
--volume "crdb:/cockroach/cockroach-data"
- |
docker volume create crdb
docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080 \
--env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123 \
--volume "crdb:/cockroach/cockroach-data" \
cockroachdb/cockroach:latest-v22.2 start-single-node
- >
docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222
docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222 \
nats:2.9 --http_port 8222 --user tfs --pass tfs123
- echo "Waiting for initialization..."
- while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done
- docker logs crdb
- while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done
- docker logs nats
- docker ps -a
- CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- echo $CRDB_ADDRESS
- NATS_ADDRESS=$(docker inspect nats --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- echo $NATS_ADDRESS
- >
docker run --name context -d -p 1010:1010
--env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require"
--env "MB_BACKEND=nats"
--env "NATS_URI=nats://tfs:tfs123@${NATS_ADDRESS}:4222"
--network=teraflowbridge
echo "Waiting for initialization..."
while ! docker logs crdb 2>&1 | grep -q 'finished creating default user "tfs"'; do sleep 1; done
docker logs crdb
while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done
docker logs nats
docker ps -a
CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
echo $CRDB_ADDRESS
NATS_ADDRESS=$(docker inspect nats --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
echo $NATS_ADDRESS
# Context Service Preparation
- |
docker run --name context -d -p 1010:1010 \
--env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require" \
--env "MB_BACKEND=nats" \
--env "NATS_URI=nats://tfs:tfs123@${NATS_ADDRESS}:4222" \
--network=teraflowbridge \
$CI_REGISTRY_IMAGE/context:$IMAGE_TAG
- CONTEXTSERVICE_SERVICE_HOST=$(docker inspect context --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- echo $CONTEXTSERVICE_SERVICE_HOST
CONTEXTSERVICE_SERVICE_HOST=$(docker inspect context --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
echo $CONTEXTSERVICE_SERVICE_HOST
# Device preparation
- >
docker run --name device -d -p 2020:2020
--env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}"
--network=teraflowbridge
- |
docker run --name device -d -p 2020:2020 \
--env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" \
--network=teraflowbridge \
$CI_REGISTRY_IMAGE/device:$IMAGE_TAG
- DEVICESERVICE_SERVICE_HOST=$(docker inspect device --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- echo $DEVICESERVICE_SERVICE_HOST
DEVICESERVICE_SERVICE_HOST=$(docker inspect device --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
echo $DEVICESERVICE_SERVICE_HOST
# PathComp preparation
- >
docker run --name pathcomp-backend -d -p 8081:8081
--network=teraflowbridge
- |
docker run --name pathcomp-backend -d -p 8081:8081 \
--network=teraflowbridge \
$CI_REGISTRY_IMAGE/pathcomp-backend:$IMAGE_TAG
- PATHCOMP_BACKEND_HOST=$(docker inspect pathcomp-backend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- echo $PATHCOMP_BACKEND_HOST
- sleep 1
- >
docker run --name pathcomp-frontend -d -p 10020:10020
--env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}"
--env "PATHCOMP_BACKEND_HOST=${PATHCOMP_BACKEND_HOST}"
--env "PATHCOMP_BACKEND_PORT=8081"
--network=teraflowbridge
PATHCOMP_BACKEND_HOST=$(docker inspect pathcomp-backend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
echo $PATHCOMP_BACKEND_HOST
sleep 1
docker run --name pathcomp-frontend -d -p 10020:10020 \
--env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" \
--env "PATHCOMP_BACKEND_HOST=${PATHCOMP_BACKEND_HOST}" \
--env "PATHCOMP_BACKEND_PORT=8081" \
--network=teraflowbridge \
$CI_REGISTRY_IMAGE/pathcomp-frontend:$IMAGE_TAG
- sleep 1
- PATHCOMPSERVICE_SERVICE_HOST=$(docker inspect pathcomp-frontend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- echo $PATHCOMPSERVICE_SERVICE_HOST
sleep 1
PATHCOMPSERVICE_SERVICE_HOST=$(docker inspect pathcomp-frontend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
echo $PATHCOMPSERVICE_SERVICE_HOST
# Service preparation
- >
docker run --name $IMAGE_NAME -d -p 3030:3030
--env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}"
--env "DEVICESERVICE_SERVICE_HOST=${DEVICESERVICE_SERVICE_HOST}"
--env "PATHCOMPSERVICE_SERVICE_HOST=${PATHCOMPSERVICE_SERVICE_HOST}"
--volume "$PWD/src/$IMAGE_NAME/tests:/opt/results"
--network=teraflowbridge
- |
docker run --name $IMAGE_NAME -d -p 3030:3030 \
--env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" \
--env "DEVICESERVICE_SERVICE_HOST=${DEVICESERVICE_SERVICE_HOST}" \
--env "PATHCOMPSERVICE_SERVICE_HOST=${PATHCOMPSERVICE_SERVICE_HOST}" \
--volume "$PWD/src/$IMAGE_NAME/tests:/opt/results" \
--network=teraflowbridge \
$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
sleep 5
docker ps -a
docker logs context
docker logs device
docker logs pathcomp-frontend
docker logs pathcomp-backend
docker logs $IMAGE_NAME
# Check status before the tests
- sleep 5
- docker ps -a
- docker logs context
- docker logs device
- docker logs pathcomp-frontend
- docker logs pathcomp-backend
- docker logs $IMAGE_NAME
# Mock QKD Nodes Deployment
- |
echo "Starting stage: deploy_mock_nodes"
- pip install flask # Install Flask to ensure it is available
- |
for port in 11111 22222 33333; do
if lsof -i:$port >/dev/null 2>&1; then
echo "Freeing up port $port..."
fuser -k $port/tcp
fi
done
MOCK_NODES_DIR="$PWD/src/tests/tools/mock_qkd_nodes"
if [ -d "$MOCK_NODES_DIR" ]; then
cd "$MOCK_NODES_DIR" || exit
./start.sh &
MOCK_NODES_PID=$!
else
echo "Error: Mock QKD nodes directory '$MOCK_NODES_DIR' not found."
exit 1
fi
echo "Waiting for mock nodes to be up..."
RETRY_COUNT=0
MAX_RETRIES=15
while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
if curl -s http://127.0.0.1:11111 > /dev/null && \
curl -s http://127.0.0.1:22222 > /dev/null && \
curl -s http://127.0.0.1:33333 > /dev/null; then
echo "Mock nodes are up!"
break
else
echo "Mock nodes not ready, retrying in 5 seconds..."
RETRY_COUNT=$((RETRY_COUNT + 1))
sleep 5
fi
done
if [ $RETRY_COUNT -ge $MAX_RETRIES ]; then
echo "Error: Mock nodes failed to start after multiple attempts."
exit 1
fi
# Run the tests
- >
docker exec -i $IMAGE_NAME bash -c
- |
docker exec -i $IMAGE_NAME bash -c \
"coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
- docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
# Run QKD Bootstrap Test
- docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose service/tests/qkd/test_functional_bootstrap.py"
coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
after_script:
......
......@@ -32,6 +32,10 @@ RUN python3 -m pip install --upgrade pip
RUN python3 -m pip install --upgrade setuptools wheel
RUN python3 -m pip install --upgrade pip-tools
# Install Flask globally
RUN python3 -m pip install --upgrade pip
RUN python3 -m pip install flask
# Get common Python packages
# Note: this step enables sharing the previous Docker build steps among all the Python components
WORKDIR /var/teraflow
......
......@@ -10,68 +10,64 @@
"device_id": {"device_uuid": {"uuid": "QKD1"}}, "device_type": "qkd-node",
"device_operational_status": 0, "device_drivers": [12], "device_endpoints": [],
"device_config": {"config_rules": [
{"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
{"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "<YOUR_MACHINE_IP>"}},
{"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "11111"}},
{"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
"scheme": "http"
}}}
]}
},
{
"device_id": {"device_uuid": {"uuid": "QKD2"}}, "device_type": "qkd-node",
"device_operational_status": 0, "device_drivers": [12], "device_endpoints": [],
"device_config": {"config_rules": [
{"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
{"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "<YOUR_MACHINE_IP>"}},
{"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "22222"}},
{"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
"scheme": "http"
}}}
]}
},
{
{
"device_id": {"device_uuid": {"uuid": "QKD3"}}, "device_type": "qkd-node",
"device_operational_status": 0, "device_drivers": [12], "device_endpoints": [],
"device_config": {"config_rules": [
{"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}},
{"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "<YOUR_MACHINE_IP>"}},
{"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "33333"}},
{"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {
"scheme": "http"
}}}
]}
}
],
"links": [
{
"link_id": {"link_uuid": {"uuid": "QKD1/10.0.2.10:1001==QKD2/10.0.2.10:2001"}},
{
"link_id": {"link_uuid": {"uuid": "QKD1/<YOUR_MACHINE_IP>:1001==QKD2/<YOUR_MACHINE_IP>:2001"}},
"link_endpoint_ids": [
{"device_id": {"device_uuid": {"uuid": "QKD1"}}, "endpoint_uuid": {"uuid": "10.0.2.10:1001"}},
{"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2001"}}
{"device_id": {"device_uuid": {"uuid": "QKD1"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:1001"}},
{"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:2001"}}
]
},
{
"link_id": {"link_uuid": {"uuid": "QKD2/10.0.2.10:2001==QKD1/10.0.2.10:1001"}},
"link_id": {"link_uuid": {"uuid": "QKD2/<YOUR_MACHINE_IP>:2001==QKD1/<YOUR_MACHINE_IP>:1001"}},
"link_endpoint_ids": [
{"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2001"}},
{"device_id": {"device_uuid": {"uuid": "QKD1"}}, "endpoint_uuid": {"uuid": "10.0.2.10:1001"}}
{"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:2001"}},
{"device_id": {"device_uuid": {"uuid": "QKD1"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:1001"}}
]
},
{
"link_id": {"link_uuid": {"uuid": "QKD2/10.0.2.10:2002==QKD3/10.0.2.10:3001"}},
{
"link_id": {"link_uuid": {"uuid": "QKD2/<YOUR_MACHINE_IP>:2002==QKD3/<YOUR_MACHINE_IP>:3001"}},
"link_endpoint_ids": [
{"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2002"}},
{"device_id": {"device_uuid": {"uuid": "QKD3"}}, "endpoint_uuid": {"uuid": "10.0.2.10:3001"}}
{"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:2002"}},
{"device_id": {"device_uuid": {"uuid": "QKD3"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:3001"}}
]
},
{
"link_id": {"link_uuid": {"uuid": "QKD3/10.0.2.10:3001==QKD2/10.0.2.10:2002"}},
{
"link_id": {"link_uuid": {"uuid": "QKD3/<YOUR_MACHINE_IP>:3001==QKD2/<YOUR_MACHINE_IP>:2002"}},
"link_endpoint_ids": [
{"device_id": {"device_uuid": {"uuid": "QKD3"}}, "endpoint_uuid": {"uuid": "10.0.2.10:3001"}},
{"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "10.0.2.10:2002"}}
{"device_id": {"device_uuid": {"uuid": "QKD3"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:3001"}},
{"device_id": {"device_uuid": {"uuid": "QKD2"}}, "endpoint_uuid": {"uuid": "<YOUR_MACHINE_IP>:2002"}}
]
}
]
}
}
\ No newline at end of file
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging, os, time, json, socket, re
from common.Constants import DEFAULT_CONTEXT_NAME
from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty
from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario
from common.tools.object_factory.Context import json_context_id
from context.client.ContextClient import ContextClient
from device.client.DeviceClient import DeviceClient
from tests.Fixtures import context_client, device_client # pylint: disable=unused-import
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
# Update the path to your QKD descriptor file
DESCRIPTOR_FILE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'descriptorQKD_links.json')
ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))
def load_descriptor_with_runtime_ip(descriptor_file_path):
"""
Load the descriptor file and replace placeholder IP with the machine's IP address.
"""
with open(descriptor_file_path, 'r') as descriptor_file:
descriptor = descriptor_file.read()
# Get the current machine's IP address
try:
# Use socket to get the local IP address directly from the network interface
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
current_ip = s.getsockname()[0]
s.close()
except Exception as e:
raise Exception(f"Unable to get the IP address: {str(e)}")
# Replace all occurrences of <YOUR_MACHINE_IP> with the current IP
updated_descriptor = re.sub(r"<YOUR_MACHINE_IP>", current_ip, descriptor)
# Write updated descriptor back
with open(descriptor_file_path, 'w') as descriptor_file:
descriptor_file.write(updated_descriptor)
return json.loads(updated_descriptor)
def load_and_process_descriptor(context_client, device_client, descriptor_file_path):
"""
Function to load and process descriptor programmatically, similar to what WebUI does.
"""
print(f"Loading descriptor from file: {descriptor_file_path}")
try:
# Update the descriptor with the runtime IP address
descriptor = load_descriptor_with_runtime_ip(descriptor_file_path)
# Initialize DescriptorLoader with the updated descriptor file
descriptor_loader = DescriptorLoader(
descriptors_file=descriptor_file_path, context_client=context_client, device_client=device_client
)
# Process and validate the descriptor
print("Processing the descriptor...")
results = descriptor_loader.process()
print(f"Descriptor processing results: {results}")
print("Checking descriptor load results...")
check_descriptor_load_results(results, descriptor_loader)
print("Validating descriptor...")
descriptor_loader.validate()
print("Descriptor validated successfully.")
except Exception as e:
LOGGER.error(f"Failed to load and process descriptor: {e}")
raise e
def test_qkd_scenario_bootstrap(
context_client: ContextClient, # pylint: disable=redefined-outer-name
device_client: DeviceClient, # pylint: disable=redefined-outer-name
) -> None:
"""
This test validates that the QKD scenario is correctly bootstrapped.
"""
print("Starting QKD scenario bootstrap test...")
# Check if context_client and device_client are instantiated
if context_client is None:
print("Error: context_client is not instantiated!")
else:
print(f"context_client is instantiated: {context_client}")
if device_client is None:
print("Error: device_client is not instantiated!")
else:
print(f"device_client is instantiated: {device_client}")
# Validate empty scenario
print("Validating empty scenario...")
validate_empty_scenario(context_client)
# Load the descriptor
load_and_process_descriptor(context_client, device_client, DESCRIPTOR_FILE_PATH)
def test_qkd_devices_enabled(
context_client: ContextClient, # pylint: disable=redefined-outer-name
) -> None:
"""
This test validates that the QKD devices are enabled.
"""
print("Starting QKD devices enabled test...")
# Check if context_client is instantiated
if context_client is None:
print("Error: context_client is not instantiated!")
else:
print(f"context_client is instantiated: {context_client}")
DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
num_devices = -1
num_devices_enabled, num_retry = 0, 0
while (num_devices != num_devices_enabled) and (num_retry < 10):
print(f"Attempt {num_retry + 1}: Checking device status...")
time.sleep(1.0) # Add a delay to allow for device enablement
response = context_client.ListDevices(Empty())
num_devices = len(response.devices)
print(f"Total devices found: {num_devices}")
num_devices_enabled = 0
for device in response.devices:
if device.device_operational_status == DEVICE_OP_STATUS_ENABLED:
num_devices_enabled += 1
print(f"Devices enabled: {num_devices_enabled}/{num_devices}")
num_retry += 1
# Final check to ensure all devices are enabled
print(f"Final device status: {num_devices_enabled}/{num_devices} devices enabled.")
assert num_devices_enabled == num_devices
print("QKD devices enabled test completed.")
\ No newline at end of file
# Build, tag, and push the Docker image to the GitLab Docker registry
build qkd_end2end:
variables:
TEST_NAME: 'qkd_end2end'
stage: build
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker buildx build -t "${TEST_NAME}:latest" -f ./src/tests/${TEST_NAME}/Dockerfile .
- docker tag "${TEST_NAME}:latest" "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest"
- docker push "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest"
after_script:
- docker images --filter="dangling=true" --quiet | xargs -r docker rmi
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
- changes:
- src/common/**/*.py
- proto/*.proto
- src/tests/${TEST_NAME}/**/*.{py,in,sh,yml}
- src/tests/${TEST_NAME}/Dockerfile
- .gitlab-ci.yml
# Deploy TeraFlowSDN and Execute end-2-end test
end2end_test qkd_end2end:
variables:
TEST_NAME: 'qkd_end2end'
stage: end2end_test
# Needs to run after build stage
needs:
- build qkd_end2end
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
- docker rm -f crdb nats mock_qkd context device pathcomp-frontend pathcomp-backend service qkd_end2end
- docker network rm teraflowbridge
script:
# Create Docker network for inter-container communication
- docker network create -d bridge --subnet=172.254.253.0/24 --gateway=172.254.253.254 teraflowbridge
# Pull necessary images
- docker pull "$CI_REGISTRY_IMAGE/context:latest"
- docker pull "$CI_REGISTRY_IMAGE/device:latest"
- docker pull "$CI_REGISTRY_IMAGE/service:latest"
- docker pull "$CI_REGISTRY_IMAGE/pathcomp-frontend:latest"
- docker pull "$CI_REGISTRY_IMAGE/pathcomp-backend:latest"
- docker pull "$CI_REGISTRY_IMAGE/qkd_app:latest"
- docker pull "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest"
# Deploy CockroachDB (crdb) and NATS
- docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080 cockroachdb/cockroach:latest-v22.2 start-single-node --insecure
- docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222 nats:2.9 --http_port 8222 --user tfs --pass tfs123
# Wait for CockroachDB and NATS to initialize
- echo "Waiting for CockroachDB to be ready..."
- while ! docker logs crdb 2>&1 | grep -q 'CockroachDB node starting'; do sleep 1; done
- docker logs crdb
- echo "Waiting for NATS to be ready..."
- while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done
- docker logs nats
# Start mock QKD nodes
- docker run --name mock_qkd -d --network=teraflowbridge -v "$PWD/src/tests/tools/mock_qkd_nodes:/app" python:3.9-slim bash -c "cd /app && ./start.sh"
# Wait for mock QKD nodes to initialize
- echo "Waiting for mock QKD nodes to be ready..."
- sleep 10
# Deploy TeraFlowSDN services
- CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- NATS_ADDRESS=$(docker inspect nats --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
# Deploy Context Service
- docker run --name context -d -p 1010:1010 --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require" --env "MB_BACKEND=nats" --env "NATS_URI=nats://tfs:tfs123@${NATS_ADDRESS}:4222" --network=teraflowbridge $CI_REGISTRY_IMAGE/context:latest
- CONTEXTSERVICE_SERVICE_HOST=$(docker inspect context --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
# Deploy Device Service
- docker run --name device -d -p 2020:2020 --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" --network=teraflowbridge $CI_REGISTRY_IMAGE/device:latest
- DEVICESERVICE_SERVICE_HOST=$(docker inspect device --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
# Deploy PathComp Services (frontend and backend)
- docker run --name pathcomp-backend -d -p 8081:8081 --network=teraflowbridge $CI_REGISTRY_IMAGE/pathcomp-backend:latest
- PATHCOMP_BACKEND_HOST=$(docker inspect pathcomp-backend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
- docker run --name pathcomp-frontend -d -p 10020:10020 --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" --env "PATHCOMP_BACKEND_HOST=${PATHCOMP_BACKEND_HOST}" --env "PATHCOMP_BACKEND_PORT=8081" --network=teraflowbridge $CI_REGISTRY_IMAGE/pathcomp-frontend:latest
- PATHCOMPSERVICE_SERVICE_HOST=$(docker inspect pathcomp-frontend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
# Deploy Service Component
- docker run --name service -d -p 3030:3030 --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}" --env "DEVICESERVICE_SERVICE_HOST=${DEVICESERVICE_SERVICE_HOST}" --env "PATHCOMPSERVICE_SERVICE_HOST=${PATHCOMPSERVICE_SERVICE_HOST}" --volume "$PWD/src/service/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/service:latest
# Wait for services to initialize
- sleep 10
# Run end-to-end tests for QKD application
- docker run --name ${TEST_NAME} -t --network=teraflowbridge --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" $CI_REGISTRY_IMAGE/${TEST_NAME}:latest
after_script:
# Dump logs for TeraFlowSDN components
- docker logs context
- docker logs device
- docker logs pathcomp-frontend
- docker logs pathcomp-backend
- docker logs service
# Dump logs for QKD mock nodes
- docker logs mock_qkd
# Clean up
- docker rm -f context device pathcomp-frontend pathcomp-backend service mock_qkd crdb nats
- docker network rm teraflowbridge
- docker volume prune --force
- docker image prune --force
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
artifacts:
when: always
reports:
junit: ./src/tests/${TEST_NAME}/report_*.xml
......@@ -13,13 +13,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
cd "$(dirname "$0")"
# Ensure the local bin directory is in the PATH
export PATH=$PATH:/home/gitlab-runner/.local/bin
# Function to kill all background processes
killbg() {
for p in "${pids[@]}" ; do
kill "$p";
kill "$p" 2>/dev/null;
done
}
......@@ -28,15 +30,13 @@ pids=()
# Set FLASK_APP and run the Flask instances on different ports
export FLASK_APP=wsgi
flask run --host 0.0.0.0 --port 11111 &
pids+=($!)
flask run --host 0.0.0.0 --port 22222 &
pids+=($!)
flask run --host 0.0.0.0 --port 33333 &
pids+=($!)
# Starting Flask instances on different ports
for port in 11111 22222 33333; do
flask run --host 0.0.0.0 --port "$port" &
pids+=($!)
sleep 2 # To avoid conflicts during startup, giving each Flask instance time to initialize
done
# Wait for all background processes to finish
wait
......@@ -13,17 +13,20 @@
# limitations under the License.
import os
import socket
from flask import Flask, request
from YangValidator import YangValidator
app = Flask(__name__)
# Retrieve the IP address of the current machine
current_ip = socket.gethostbyname(socket.gethostname())
yang_validator = YangValidator('etsi-qkd-sdn-node', ['etsi-qkd-node-types'])
# Update IP address with the current machine's IP
nodes = {
'10.0.2.10:11111': {'node': {
f'{current_ip}:11111': {'node': {
'qkdn_id': '00000001-0000-0000-0000-000000000000',
},
'qkdn_capabilities': {
......@@ -31,7 +34,7 @@ nodes = {
'qkd_applications': {
'qkd_app': [
{
'app_id': '00000001-0001-0000-0000-000000000000',
'app_id': '00000001-0001-0000-0000-000000000000',
'client_app_id': [],
'app_statistics': {
'statistics': []
......@@ -54,8 +57,8 @@ nodes = {
{
'qkdi_id': '101',
'qkdi_att_point': {
'device':'10.0.2.10',
'port':'1001'
'device': current_ip,
'port': '1001'
},
'qkdi_capabilities': {
}
......@@ -63,13 +66,11 @@ nodes = {
]
},
'qkd_links': {
'qkd_link': [
]
'qkd_link': []
}
},
'10.0.2.10:22222': {'node': {
f'{current_ip}:22222': {'node': {
'qkdn_id': '00000002-0000-0000-0000-000000000000',
},
'qkdn_capabilities': {
......@@ -77,7 +78,7 @@ nodes = {
'qkd_applications': {
'qkd_app': [
{
'app_id': '00000002-0001-0000-0000-000000000000',
'app_id': '00000002-0001-0000-0000-000000000000',
'client_app_id': [],
'app_statistics': {
'statistics': []
......@@ -100,8 +101,8 @@ nodes = {
{
'qkdi_id': '201',
'qkdi_att_point': {
'device':'10.0.2.10',
'port':'2001'
'device': current_ip,
'port': '2001'
},
'qkdi_capabilities': {
}
......@@ -109,8 +110,8 @@ nodes = {
{
'qkdi_id': '202',
'qkdi_att_point': {
'device':'10.0.2.10',
'port':'2002'
'device': current_ip,
'port': '2002'
},
'qkdi_capabilities': {
}
......@@ -118,13 +119,11 @@ nodes = {
]
},
'qkd_links': {
'qkd_link': [
]
'qkd_link': []
}
},
'10.0.2.10:33333': {'node': {
f'{current_ip}:33333': {'node': {
'qkdn_id': '00000003-0000-0000-0000-000000000000',
},
'qkdn_capabilities': {
......@@ -132,7 +131,7 @@ nodes = {
'qkd_applications': {
'qkd_app': [
{
'app_id': '00000003-0001-0000-0000-000000000000',
'app_id': '00000003-0001-0000-0000-000000000000',
'client_app_id': [],
'app_statistics': {
'statistics': []
......@@ -155,8 +154,8 @@ nodes = {
{
'qkdi_id': '301',
'qkdi_att_point': {
'device':'10.0.2.10',
'port':'3001'
'device': current_ip,
'port': '3001'
},
'qkdi_capabilities': {
}
......@@ -164,24 +163,29 @@ nodes = {
]
},
'qkd_links': {
'qkd_link': [
]
'qkd_link': []
}
}
}
def get_side_effect(url):
steps = url.lstrip('https://').lstrip('http://').rstrip('/')
ip_port, _, _, header, *steps = steps.split('/')
parts = steps.split('/')
# Ensure there are enough parts to unpack
if len(parts) < 4:
raise ValueError(f"Expected at least 4 parts in the URL, got {len(parts)}: {steps}")
ip_port, _, _, header, *steps = parts
header_splitted = header.split(':')
module = header_splitted[0]
assert(module == 'etsi-qkd-sdn-node')
if ip_port.startswith('127.0.0.1'):
ip_port = ip_port.replace('127.0.0.1', current_ip)
tree = {'qkd_node': nodes[ip_port]['node'].copy()}
if len(header_splitted) == 1 or not header_splitted[1]:
......@@ -197,35 +201,18 @@ def get_side_effect(url):
if not steps:
return tree, tree
endpoint, *steps = steps
value = nodes[ip_port][endpoint]
if not steps:
return_value = {endpoint:value}
return_value = {endpoint: value}
tree['qkd_node'].update(return_value)
return return_value, tree
'''
element, *steps = steps
container, key = element.split('=')
# value = value[container][key]
if not steps:
return_value['qkd_node'][endpoint] = [value]
return return_value
'''
raise Exception('Url too long')
def edit(from_dict, to_dict, create):
for key, value in from_dict.items():
if isinstance(value, dict):
......@@ -237,11 +224,15 @@ def edit(from_dict, to_dict, create):
else:
to_dict[key] = value
def edit_side_effect(url, json, create):
steps = url.lstrip('https://').lstrip('http://').rstrip('/')
ip_port, _, _, header, *steps = steps.split('/')
parts = steps.split('/')
# Ensure there are enough parts to unpack
if len(parts) < 4:
raise ValueError(f"Expected at least 4 parts in the URL, got {len(parts)}: {steps}")
ip_port, _, _, header, *steps = parts
module, root = header.split(':')
......@@ -249,7 +240,7 @@ def edit_side_effect(url, json, create):
assert(root == 'qkd_node')
if not steps:
edit(json, nodes[ip_port]['node'])
edit(json, nodes[ip_port]['node'], create)
return
endpoint, *steps = steps
......@@ -258,36 +249,19 @@ def edit_side_effect(url, json, create):
edit(json[endpoint], nodes[ip_port][endpoint], create)
return
'''
element, *steps = steps
container, key = element.split('=')
if not steps:
if key not in nodes[ip_port][endpoint][container] and create:
nodes[ip_port][endpoint][container][key] = {}
edit(json, nodes[ip_port][endpoint][container][key], create)
return 0
'''
raise Exception('Url too long')
@app.get('/', defaults={'path': ''})
@app.get("/<string:path>")
@app.get('/<path:path>')
def get(path):
msg, msg_validate = get_side_effect(request.base_url)
print(msg_validate)
yang_validator.parse_to_dict(msg_validate)
return msg
try:
msg, msg_validate = get_side_effect(request.base_url)
print(msg_validate)
yang_validator.parse_to_dict(msg_validate)
return msg
except ValueError as e:
return {'error': str(e)}, 400
@app.post('/', defaults={'path': ''})
@app.post("/<string:path>")
......@@ -301,8 +275,6 @@ def post(path):
reason = str(e)
success = False
return {'success': success, 'reason': reason}
@app.route('/', defaults={'path': ''}, methods=['PUT', 'PATCH'])
@app.route("/<string:path>", methods=['PUT', 'PATCH'])
......@@ -317,10 +289,6 @@ def patch(path):
success = False
return {'success': success, 'reason': reason}
# import json
# from mock import requests
# import pyangbind.lib.pybindJSON as enc
......@@ -335,7 +303,6 @@ def patch(path):
# var = dec.load_json(z, None, None, obj=module)
# print(enc.dumps(var))
# Reset module variable because it is already filled
# module = etsi_qkd_sdn_node()
......@@ -345,7 +312,6 @@ def patch(path):
# var = dec.load_json(z, None, None, obj=node)
# print(enc.dumps(var))
# # Get all apps
# apps = node.qkd_applications
# z = requests.get(url + 'qkd_node/qkd_applications').json()
......