diff --git a/.gitignore b/.gitignore index 7e3b0cd6a26b755aeac4422f530c331d25a0cc43..0a116f850780386a9fe1010b22164f4c7dbf8228 100644 --- a/.gitignore +++ b/.gitignore @@ -162,6 +162,7 @@ cython_debug/ # TeraFlowSDN-generated files tfs_runtime_env_vars.sh +tfs_runtime_env_vars*.sh tfs_bchain_runtime_env_vars.sh delete_local_deployment.sh local_docker_deployment.sh diff --git a/deploy.sh b/deploy.sh index add41fa139a0127cb26d652f5b47decfe8658ad0..fa1dc2b3623255d2dac82cc1d982c607b9b6af5b 100755 --- a/deploy.sh +++ b/deploy.sh @@ -36,9 +36,13 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} # If not already set, set additional manifest files to be applied after the deployment export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""} -# If not already set, set the neew Grafana admin password +# If not already set, set the new Grafana admin password export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"} +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} + ######################################################################################################################## # Automated steps start here ######################################################################################################################## @@ -67,73 +71,75 @@ echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT for COMPONENT in $TFS_COMPONENTS; do echo "Processing '$COMPONENT' component..." - echo " Building Docker image..." - BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" - - if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then - docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" - elif [ "$COMPONENT" == "pathcomp" ]; then - BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" - docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG" - - BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" - docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG" - # next command is redundant, but helpful to keep cache updated between rebuilds - IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" - docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" - elif [ "$COMPONENT" == "dlt" ]; then - BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log" - docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG" - - BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log" - docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG" - else - docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" - fi + if [ "$TFS_SKIP_BUILD" != "YES" ]; then + echo " Building Docker image..." + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" + + if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then + docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" + elif [ "$COMPONENT" == "pathcomp" ]; then + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" + docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG" + + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" + docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG" + # next command is redundant, but helpful to keep cache updated between rebuilds + IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" + docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + elif [ "$COMPONENT" == "dlt" ]; then + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log" + docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG" - if [ -n "$TFS_REGISTRY_IMAGE" ]; then - echo " Pushing Docker image to '$TFS_REGISTRY_IMAGE'..." + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log" + docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG" + else + docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" + fi - if [ "$COMPONENT" == "pathcomp" ]; then - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + if [ -n "$TFS_REGISTRY_IMAGE" ]; then + echo " Pushing Docker image to '$TFS_REGISTRY_IMAGE'..." - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" - docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + if [ "$COMPONENT" == "pathcomp" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" + docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log" - docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" - elif [ "$COMPONENT" == "dlt" ]; then - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log" + docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log" - docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + elif [ "$COMPONENT" == "dlt" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log" + docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log" - docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" - else - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log" + docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + else + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" - docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" + docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + fi fi fi diff --git a/src/tests/netx22-p4/README.md b/hackfest/p4/README.md similarity index 84% rename from src/tests/netx22-p4/README.md rename to hackfest/p4/README.md index ddc759f43477fa69cdfd94cf979f9e3a803e77de..344b88c0826a4d44ded9c8369a5009e73ed211b4 100644 --- a/src/tests/netx22-p4/README.md +++ b/hackfest/p4/README.md @@ -4,12 +4,12 @@ This functional test shows the P4 driver with a basic connectivity test between ## Functional test folder -This functional test can be found in folder `src/tests/netx22-p4/`. +This functional test can be found in folder `hackfest/p4` ## P4 source and Mininet topology -This test is designed to operate with a mininet deployment that contains 2 hosts and a BMv2 switch, such a topology can be found in the 'src/tests/netx22-p4/mininet' folder. -Additionally the P4 source code, along with its compiled artifacts are present in the 'src/tests/netx22-p4/mininet' folder. +This test is designed to operate with a mininet deployment that contains 2 hosts and a BMv2 switch, such a topology can be found in the 'hackfest/p4/mininet' folder. +Additionally the P4 source code, along with its compiled artifacts are present in the 'hackfest/p4/p4' folder. ## Deployment and Dependencies @@ -50,7 +50,7 @@ start-simple: NGSDN_TOPO_PY := topo-simple.py start-simple: _start ``` -And copy the topology file from ~/tfs-ctrl/src/tests/netx22-p4/mininet/topo-simple.py to the ~/ngsdn-tutorial/mininet/ directory. +And copy the topology file from ~/tfs-ctrl/hackfest/p4/mininet/topo-simple.py to the ~/ngsdn-tutorial/mininet/ directory. ## Test Execution @@ -70,28 +70,28 @@ client ping server In another terminal cd to the teraflow directory and run the following ``` -src/tests/netx22-p4/setup.sh +hackfest/p4/setup.sh ``` This will copy the p4 artifacts to the device pod. Then you can bootstrap the device to the Teraflow Controller ``` -src/tests/netx22-p4/run_test_01_bootstrap.sh +hackfest/p4/run_test_01_bootstrap.sh ``` Install the required rules to the p4 switch ``` -src/tests/netx22-p4/run_test_02_create_service.sh +hackfest/p4/run_test_02_create_service.sh ``` You should now check the mininet terminal. The two hosts should be pinging each other as intended. You can remove the rules from the p4 switch ``` -src/tests/netx22-p4/run_test_03_delete_service.sh +hackfest/p4/run_test_03_delete_service.sh ``` The two hosts on the mininet terminal, should stop pinging. And remove the device from the Teraflow Controller ``` -src/tests/netx22-p4/run_test_04_cleanup.sh +hackfest/p4/run_test_04_cleanup.sh ``` diff --git a/src/compute/tests/mock_osm/__init__.py b/hackfest/p4/__init__.py similarity index 100% rename from src/compute/tests/mock_osm/__init__.py rename to hackfest/p4/__init__.py diff --git a/src/tests/netx22-p4/deploy_specs.sh b/hackfest/p4/deploy_specs.sh similarity index 100% rename from src/tests/netx22-p4/deploy_specs.sh rename to hackfest/p4/deploy_specs.sh diff --git a/src/tests/netx22-p4/mininet/topo-simple.py b/hackfest/p4/mininet/topo-simple.py similarity index 100% rename from src/tests/netx22-p4/mininet/topo-simple.py rename to hackfest/p4/mininet/topo-simple.py diff --git a/src/tests/netx22-p4/p4/bmv2.json b/hackfest/p4/p4/bmv2.json similarity index 100% rename from src/tests/netx22-p4/p4/bmv2.json rename to hackfest/p4/p4/bmv2.json diff --git a/src/tests/netx22-p4/p4/main.p4 b/hackfest/p4/p4/main.p4 similarity index 100% rename from src/tests/netx22-p4/p4/main.p4 rename to hackfest/p4/p4/main.p4 diff --git a/src/tests/netx22-p4/p4/p4info.txt b/hackfest/p4/p4/p4info.txt similarity index 100% rename from src/tests/netx22-p4/p4/p4info.txt rename to hackfest/p4/p4/p4info.txt diff --git a/src/tests/netx22-p4/run_test_01_bootstrap.sh b/hackfest/p4/run_test_01_bootstrap.sh similarity index 90% rename from src/tests/netx22-p4/run_test_01_bootstrap.sh rename to hackfest/p4/run_test_01_bootstrap.sh index a3aeaa2b624bf28a06d379247e97211915522746..42e647be17b0e1731a8c69fb68c2cb414fdb542c 100755 --- a/src/tests/netx22-p4/run_test_01_bootstrap.sh +++ b/hackfest/p4/run_test_01_bootstrap.sh @@ -18,4 +18,5 @@ # - tfs_runtime_env_vars.sh source tfs_runtime_env_vars.sh -python -m pytest --verbose src/tests/netx22-p4/tests/test_functional_bootstrap.py +python -m pytest --verbose hackfest/p4/tests/test_functional_bootstrap.py + diff --git a/src/tests/netx22-p4/run_test_02_create_service.sh b/hackfest/p4/run_test_02_create_service.sh similarity index 88% rename from src/tests/netx22-p4/run_test_02_create_service.sh rename to hackfest/p4/run_test_02_create_service.sh index eb2b2d1ab2861bbcfc1d1fcd091ffc784945ceae..8fb9038d8abaff5abd36b18a316af267186f7fcc 100755 --- a/src/tests/netx22-p4/run_test_02_create_service.sh +++ b/hackfest/p4/run_test_02_create_service.sh @@ -14,4 +14,4 @@ # limitations under the License. source tfs_runtime_env_vars.sh -python -m pytest --verbose src/tests/netx22-p4/tests/test_functional_create_service.py +python -m pytest --verbose hackfest/p4/tests/test_functional_create_service.py diff --git a/src/tests/netx22-p4/run_test_03_delete_service.sh b/hackfest/p4/run_test_03_delete_service.sh similarity index 88% rename from src/tests/netx22-p4/run_test_03_delete_service.sh rename to hackfest/p4/run_test_03_delete_service.sh index 918073c84ab0a1b42c832fb2bf048eabb0aeabc2..96f79c0714b65f7ebfd125b7d429e23d7213549d 100755 --- a/src/tests/netx22-p4/run_test_03_delete_service.sh +++ b/hackfest/p4/run_test_03_delete_service.sh @@ -14,4 +14,4 @@ # limitations under the License. source tfs_runtime_env_vars.sh -python -m pytest --verbose src/tests/netx22-p4/tests/test_functional_delete_service.py +python -m pytest --verbose hackfest/p4/tests/test_functional_delete_service.py diff --git a/src/tests/netx22-p4/run_test_04_cleanup.sh b/hackfest/p4/run_test_04_cleanup.sh similarity index 89% rename from src/tests/netx22-p4/run_test_04_cleanup.sh rename to hackfest/p4/run_test_04_cleanup.sh index 9e70d02ad09fcdd5c3f7a77b3f0361f366c7f989..5cb265f6f647516e0ec4da9484bff5ec7d6e488a 100755 --- a/src/tests/netx22-p4/run_test_04_cleanup.sh +++ b/hackfest/p4/run_test_04_cleanup.sh @@ -14,4 +14,4 @@ # limitations under the License. source tfs_runtime_env_vars.sh -python -m pytest --verbose src/tests/netx22-p4/tests/test_functional_cleanup.py +python -m pytest --verbose hackfest/p4/tests/test_functional_cleanup.py diff --git a/src/tests/netx22-p4/setup.sh b/hackfest/p4/setup.sh similarity index 50% rename from src/tests/netx22-p4/setup.sh rename to hackfest/p4/setup.sh index 07fe22e6aea2341c50462010b4bfb55c4a657a47..195327a03fedafdc64a2d0dc34577766eda72a4f 100755 --- a/src/tests/netx22-p4/setup.sh +++ b/hackfest/p4/setup.sh @@ -4,5 +4,5 @@ export POD_NAME=$(kubectl get pods -n=tfs | grep device | awk '{print $1}') kubectl exec ${POD_NAME} -n=tfs -- mkdir /root/p4 -kubectl cp src/tests/netx22-p4/p4/p4info.txt tfs/${POD_NAME}:/root/p4 -kubectl cp src/tests/netx22-p4/p4/bmv2.json tfs/${POD_NAME}:/root/p4 +kubectl cp hackfest/p4/p4/p4info.txt tfs/${POD_NAME}:/root/p4 +kubectl cp hackfest/p4/p4/bmv2.json tfs/${POD_NAME}:/root/p4 diff --git a/src/tests/netx22-p4/tests/.gitignore b/hackfest/p4/tests/.gitignore similarity index 100% rename from src/tests/netx22-p4/tests/.gitignore rename to hackfest/p4/tests/.gitignore diff --git a/src/tests/netx22-p4/tests/BuildDescriptors.py b/hackfest/p4/tests/BuildDescriptors.py similarity index 100% rename from src/tests/netx22-p4/tests/BuildDescriptors.py rename to hackfest/p4/tests/BuildDescriptors.py diff --git a/src/tests/netx22-p4/tests/LoadDescriptors.py b/hackfest/p4/tests/LoadDescriptors.py similarity index 100% rename from src/tests/netx22-p4/tests/LoadDescriptors.py rename to hackfest/p4/tests/LoadDescriptors.py diff --git a/src/tests/netx22-p4/tests/Objects.py b/hackfest/p4/tests/Objects.py similarity index 86% rename from src/tests/netx22-p4/tests/Objects.py rename to hackfest/p4/tests/Objects.py index 09b3aced843a198b7c963a34492a4fe2379c9123..c8b172244d714cd699ccc587e54c3751485a9a2e 100644 --- a/src/tests/netx22-p4/tests/Objects.py +++ b/hackfest/p4/tests/Objects.py @@ -1,4 +1,5 @@ # Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -42,6 +43,8 @@ PACKET_PORT_SAMPLE_TYPES = [ KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED, ] +# ----- Device Credentials and Settings -------------------------------------------------------------------------------- + # ----- Devices -------------------------------------------------------------------------------------------------------- @@ -54,7 +57,7 @@ DEVICE_SW1 = json_device_p4_disabled(DEVICE_SW1_UUID) DEVICE_SW1_DPID = 1 DEVICE_SW1_NAME = DEVICE_SW1_UUID -DEVICE_SW1_IP_ADDR = '10.0.2.10' +DEVICE_SW1_IP_ADDR = 'localhost' DEVICE_SW1_PORT = '50001' DEVICE_SW1_VENDOR = 'Open Networking Foundation' DEVICE_SW1_HW_VER = 'BMv2 simple_switch' @@ -78,9 +81,38 @@ DEVICE_SW1_CONNECT_RULES = json_device_connect_rules( } ) +DEVICE_SW2_UUID = 'SW2' +DEVICE_SW2_TIMEOUT = 60 +DEVICE_SW2_ID = json_device_id(DEVICE_SW2_UUID) +DEVICE_SW2 = json_device_p4_disabled(DEVICE_SW2_UUID) -################################## TABLE ENTRIES ################################## +DEVICE_SW2_DPID = 1 +DEVICE_SW2_NAME = DEVICE_SW2_UUID +DEVICE_SW2_IP_ADDR = 'localhost' +DEVICE_SW2_PORT = '50002' +DEVICE_SW2_VENDOR = 'Open Networking Foundation' +DEVICE_SW2_HW_VER = 'BMv2 simple_switch' +DEVICE_SW2_SW_VER = 'Stratum' +DEVICE_SW2_BIN_PATH = '/root/p4/bmv2.json' +DEVICE_SW2_INFO_PATH = '/root/p4/p4info.txt' + +DEVICE_SW2_CONNECT_RULES = json_device_connect_rules( + DEVICE_SW2_IP_ADDR, + DEVICE_SW2_PORT, + { + 'id': DEVICE_SW2_DPID, + 'name': DEVICE_SW2_NAME, + 'vendor': DEVICE_SW2_VENDOR, + 'hw_ver': DEVICE_SW2_HW_VER, + 'sw_ver': DEVICE_SW2_SW_VER, + 'timeout': DEVICE_SW2_TIMEOUT, + 'p4bin': DEVICE_SW2_BIN_PATH, + 'p4info': DEVICE_SW2_INFO_PATH + } +) + +################################## TABLE ENTRIES ################################## DEVICE_SW1_CONFIG_TABLE_ENTRIES = [ json_config_rule_set( @@ -123,6 +155,8 @@ DEVICE_SW1_CONFIG_TABLE_ENTRIES = [ ) ] +DEVICE_SW2_CONFIG_TABLE_ENTRIES = DEVICE_SW1_CONFIG_TABLE_ENTRIES + """ DEVICE_SW1_CONFIG_TABLE_ENTRIES = [ @@ -171,7 +205,6 @@ DEVICE_SW1_CONFIG_TABLE_ENTRIES = [ ################################## TABLE DECONF ################################## - DEVICE_SW1_DECONF_TABLE_ENTRIES = [ json_config_rule_delete( 'table', @@ -213,6 +246,7 @@ DEVICE_SW1_DECONF_TABLE_ENTRIES = [ ) ] +DEVICE_SW2_DECONF_TABLE_ENTRIES = DEVICE_SW1_DECONF_TABLE_ENTRIES """ @@ -271,6 +305,7 @@ TOPOLOGIES = [TOPOLOGY] DEVICES = [ (DEVICE_SW1, DEVICE_SW1_CONNECT_RULES, DEVICE_SW1_CONFIG_TABLE_ENTRIES, DEVICE_SW1_DECONF_TABLE_ENTRIES), + (DEVICE_SW2, DEVICE_SW2_CONNECT_RULES, DEVICE_SW2_CONFIG_TABLE_ENTRIES, DEVICE_SW2_DECONF_TABLE_ENTRIES), ] LINKS = [] diff --git a/src/tests/netx22-p4/__init__.py b/hackfest/p4/tests/__init__.py similarity index 100% rename from src/tests/netx22-p4/__init__.py rename to hackfest/p4/tests/__init__.py diff --git a/src/tests/netx22-p4/tests/test_functional_bootstrap.py b/hackfest/p4/tests/test_functional_bootstrap.py similarity index 100% rename from src/tests/netx22-p4/tests/test_functional_bootstrap.py rename to hackfest/p4/tests/test_functional_bootstrap.py diff --git a/src/tests/netx22-p4/tests/test_functional_cleanup.py b/hackfest/p4/tests/test_functional_cleanup.py similarity index 97% rename from src/tests/netx22-p4/tests/test_functional_cleanup.py rename to hackfest/p4/tests/test_functional_cleanup.py index 32f716f1c2287b11bae3610022d64659d82ba73d..ccbcb9843a03bbf095743af0753da3fe8af3bfce 100644 --- a/src/tests/netx22-p4/tests/test_functional_cleanup.py +++ b/hackfest/p4/tests/test_functional_cleanup.py @@ -54,8 +54,8 @@ def test_scenario_cleanup( device_client.DeleteDevice(DeviceId(**device_id)) #expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid))) - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 # ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------ for topology in TOPOLOGIES: diff --git a/src/tests/netx22-p4/tests/test_functional_create_service.py b/hackfest/p4/tests/test_functional_create_service.py similarity index 100% rename from src/tests/netx22-p4/tests/test_functional_create_service.py rename to hackfest/p4/tests/test_functional_create_service.py diff --git a/src/tests/netx22-p4/tests/test_functional_delete_service.py b/hackfest/p4/tests/test_functional_delete_service.py similarity index 100% rename from src/tests/netx22-p4/tests/test_functional_delete_service.py rename to hackfest/p4/tests/test_functional_delete_service.py diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml index 04da586dfeb25a01a6f5267aa31441498ce4f2cc..5c07971a328a389473899375f2d2aad9031f473e 100644 --- a/manifests/contextservice.yaml +++ b/manifests/contextservice.yaml @@ -34,10 +34,10 @@ spec: - containerPort: 6379 resources: requests: - cpu: 250m - memory: 512Mi + cpu: 100m + memory: 128Mi limits: - cpu: 700m + cpu: 500m memory: 1024Mi - name: server image: registry.gitlab.com/teraflow-h2020/controller/context:latest @@ -64,11 +64,11 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:1010"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml index 46c7557d9178d1bb2bc36eda13a088606f56cede..d2595ab1915554d7ebfd786b8f39b531e40da490 100644 --- a/manifests/deviceservice.yaml +++ b/manifests/deviceservice.yaml @@ -34,7 +34,7 @@ spec: - containerPort: 2020 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:2020"] @@ -43,11 +43,11 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:2020"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service diff --git a/manifests/dltservice.yaml b/manifests/dltservice.yaml index 5ef6eae7de6cb7c839b0cb17e65c8b3f045c1d66..d2ad4f40444faa6b9de7724f8b3df077bb7910b2 100644 --- a/manifests/dltservice.yaml +++ b/manifests/dltservice.yaml @@ -35,6 +35,11 @@ spec: env: - name: LOG_LEVEL value: "INFO" + ## for debug purposes + #- name: DLT_GATEWAY_HOST + # value: "mock-blockchain.tfs-bchain.svc.cluster.local" + #- name: DLT_GATEWAY_PORT + # value: "50051" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:8080"] @@ -43,14 +48,16 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:8080"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi - name: gateway image: registry.gitlab.com/teraflow-h2020/controller/dlt-gateway:latest imagePullPolicy: Always + ports: + - containerPort: 50051 #readinessProbe: # httpGet: # path: /health @@ -65,7 +72,7 @@ spec: # timeoutSeconds: 5 resources: requests: - cpu: 250m + cpu: 200m memory: 512Mi limits: cpu: 700m diff --git a/manifests/interdomainservice.yaml b/manifests/interdomainservice.yaml index ca30da0101659f801440af343e42851146d17bda..3ef3ffba301cadf26beaa34787dcd816e87c65a0 100644 --- a/manifests/interdomainservice.yaml +++ b/manifests/interdomainservice.yaml @@ -34,7 +34,7 @@ spec: - containerPort: 10010 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:10010"] @@ -43,11 +43,11 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:10010"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service diff --git a/manifests/mock_blockchain.yaml b/manifests/mock_blockchain.yaml index b383d7db42be9eb3c9dc7758c230f5250eb43db1..bf9abac703b263ad6a843f0d70848dde94a4ab97 100644 --- a/manifests/mock_blockchain.yaml +++ b/manifests/mock_blockchain.yaml @@ -34,7 +34,7 @@ spec: - containerPort: 50051 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:50051"] @@ -43,7 +43,7 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:50051"] resources: requests: - cpu: 250m + cpu: 100m memory: 512Mi limits: cpu: 700m diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml index d5939cb154443139be88d8e0ac23c281a3b18c4d..92e24ac42b7b86be6056709abd9a2cd6fc16598b 100644 --- a/manifests/pathcompservice.yaml +++ b/manifests/pathcompservice.yaml @@ -43,11 +43,11 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:10020"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi - name: backend image: registry.gitlab.com/teraflow-h2020/controller/pathcomp-backend:latest imagePullPolicy: Always @@ -65,8 +65,8 @@ spec: # timeoutSeconds: 5 resources: requests: - cpu: 250m - memory: 512Mi + cpu: 100m + memory: 256Mi limits: cpu: 700m memory: 1024Mi diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml index efe43fe229a7f7ba862b10a04d44c6e9de06b5fb..a5568a5112eb08a02df2178ba45db57b57c19cc3 100644 --- a/manifests/serviceservice.yaml +++ b/manifests/serviceservice.yaml @@ -34,7 +34,7 @@ spec: - containerPort: 3030 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:3030"] @@ -43,11 +43,11 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:3030"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml index eeed3776c10958751b74fc81b52ab79806153b18..b20669b0c03cc22857abd1534e19780025b9066a 100644 --- a/manifests/sliceservice.yaml +++ b/manifests/sliceservice.yaml @@ -34,7 +34,7 @@ spec: - containerPort: 4040 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:4040"] @@ -43,11 +43,11 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:4040"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index cac64a816075f1a0ad91a21c519463aa5cd8f973..7f70e837c4b6b979477a3a02db6e744b41387d73 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -38,7 +38,7 @@ spec: - containerPort: 8004 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" - name: WEBUISERVICE_SERVICE_BASEURL_HTTP value: "/webui/" readinessProbe: @@ -55,7 +55,7 @@ spec: timeoutSeconds: 1 resources: requests: - cpu: 250m + cpu: 100m memory: 512Mi limits: cpu: 700m diff --git a/my_deploy.sh b/my_deploy.sh index 5eb8071f4ebc31508e12c7773e5a84fc3ef080b3..030464fa34eb2c117ea2cc5276e1d59aa4cfed61 100644 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -18,5 +18,9 @@ export TFS_K8S_NAMESPACE="tfs" # Set additional manifest files to be applied after the deployment export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" -# Set the neew Grafana admin password +# Set the new Grafana admin password export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} diff --git a/nfvsdn22 b/nfvsdn22 new file mode 120000 index 0000000000000000000000000000000000000000..ac93a84be42e09c11106c5e0836bb4e51cc1fa1a --- /dev/null +++ b/nfvsdn22 @@ -0,0 +1 @@ +src/tests/nfvsdn22/ \ No newline at end of file diff --git a/proto/context.proto b/proto/context.proto index f5dec30796a8426f512947d369b8db5f5889471a..5b49bd28866af919332ab7188bbf66203e8b766d 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -171,6 +171,7 @@ enum DeviceDriverEnum { DEVICEDRIVER_P4 = 3; DEVICEDRIVER_IETF_NETWORK_TOPOLOGY = 4; DEVICEDRIVER_ONF_TR_352 = 5; + DEVICEDRIVER_XR = 6; } enum DeviceOperationalStatusEnum { diff --git a/proto/dlt_connector.proto b/proto/dlt_connector.proto index c8cbeb663fafb3c133092e9c49c2ece3f59d75ae..1038d6ccd40c8393313fc7f8dbfd48b1e0cf1739 100644 --- a/proto/dlt_connector.proto +++ b/proto/dlt_connector.proto @@ -18,14 +18,37 @@ package dlt; import "context.proto"; service DltConnectorService { - rpc RecordAll (context.Empty ) returns (context.Empty) {} + rpc RecordAll (context.TopologyId) returns (context.Empty) {} - rpc RecordAllDevices (context.Empty ) returns (context.Empty) {} - rpc RecordDevice (context.DeviceId ) returns (context.Empty) {} + rpc RecordAllDevices (context.TopologyId) returns (context.Empty) {} + rpc RecordDevice (DltDeviceId ) returns (context.Empty) {} - rpc RecordAllServices(context.Empty ) returns (context.Empty) {} - rpc RecordService (context.ServiceId) returns (context.Empty) {} + rpc RecordAllLinks (context.TopologyId) returns (context.Empty) {} + rpc RecordLink (DltLinkId ) returns (context.Empty) {} - rpc RecordAllSlices (context.Empty ) returns (context.Empty) {} - rpc RecordSlice (context.SliceId ) returns (context.Empty) {} + rpc RecordAllServices(context.TopologyId) returns (context.Empty) {} + rpc RecordService (DltServiceId ) returns (context.Empty) {} + + rpc RecordAllSlices (context.TopologyId) returns (context.Empty) {} + rpc RecordSlice (DltSliceId ) returns (context.Empty) {} +} + +message DltDeviceId { + context.TopologyId topology_id = 1; + context.DeviceId device_id = 2; +} + +message DltLinkId { + context.TopologyId topology_id = 1; + context.LinkId link_id = 2; +} + +message DltServiceId { + context.TopologyId topology_id = 1; + context.ServiceId service_id = 2; +} + +message DltSliceId { + context.TopologyId topology_id = 1; + context.SliceId slice_id = 2; } diff --git a/proto/kpi_sample_types.proto b/proto/kpi_sample_types.proto index 3494d984970a5d3c4eb312258403e8f1fedcf3c6..4419a8df4a22047d8708c5cf2e2c3657148b5eeb 100644 --- a/proto/kpi_sample_types.proto +++ b/proto/kpi_sample_types.proto @@ -23,8 +23,8 @@ enum KpiSampleType { KPISAMPLETYPE_BYTES_TRANSMITTED = 201; KPISAMPLETYPE_BYTES_RECEIVED = 202; KPISAMPLETYPE_BYTES_DROPPED = 203; - KPISAMPLETYPE_ML_CONFIDENCE = 401; //. can be used by both optical and L3 without any issue - KPISAMPLETYPE_OPTICAL_SECURITY_STATUS = 501; //. can be used by both optical and L3 without any issue + KPISAMPLETYPE_ML_CONFIDENCE = 401; //. can be used by both optical and L3 without any issue + KPISAMPLETYPE_OPTICAL_SECURITY_STATUS = 501; //. can be used by both optical and L3 without any issue KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601; KPISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS = 602; KPISAMPLETYPE_L3_UNIQUE_ATTACKERS = 603; diff --git a/proto/monitoring.proto b/proto/monitoring.proto index 027dcb0227c9b49cf091213bd42f101374eb3bc2..f9c408c96ced121f35cc1116bf64d013e7320e6a 100644 --- a/proto/monitoring.proto +++ b/proto/monitoring.proto @@ -36,7 +36,7 @@ service MonitoringService { rpc GetAlarmResponseStream(AlarmSubscription ) returns (stream AlarmResponse) {} // Not Stable not final rpc DeleteAlarm (AlarmID ) returns (context.Empty ) {} // Stable and final rpc GetStreamKpi (KpiId ) returns (stream Kpi ) {} // Stable not final - rpc GetInstantKpi (KpiId ) returns (Kpi ) {} // Stable not final + rpc GetInstantKpi (KpiId ) returns (Kpi ) {} // Stable not final } message KpiDescriptor { @@ -59,7 +59,7 @@ message MonitorKpiRequest { } message KpiQuery { - repeated KpiId kpi_ids = 1; + repeated KpiId kpi_ids = 1; float monitoring_window_s = 2; uint32 last_n_samples = 3; // used when you want something like "get the last N many samples context.Timestamp start_timestamp = 4; // used when you want something like "get the samples since X date/time" diff --git a/proto/policy.proto b/proto/policy.proto index d8e51caea2231e21b982771e7a4d63f3db93471c..9d0c34a3304f68c47a19ac56d0e96b10936bee7b 100644 --- a/proto/policy.proto +++ b/proto/policy.proto @@ -109,5 +109,5 @@ message PolicyRuleDeviceList { // A list of policy rules message PolicyRuleList { - repeated PolicyRuleId policyRules = 1; + repeated PolicyRule policyRules = 1; } diff --git a/scripts/show_logs_dlt_connector.sh b/scripts/show_logs_dlt_connector.sh new file mode 100755 index 0000000000000000000000000000000000000000..db4c388c20399007ba10b357a5e153df4a86c519 --- /dev/null +++ b/scripts/show_logs_dlt_connector.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/dltservice -c connector diff --git a/scripts/show_logs_dlt_gateway.sh b/scripts/show_logs_dlt_gateway.sh new file mode 100755 index 0000000000000000000000000000000000000000..c00be2df16cb69b3ace501a854d1248a72abbf3e --- /dev/null +++ b/scripts/show_logs_dlt_gateway.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/dltservice -c gateway diff --git a/scripts/show_logs_monitoring.sh b/scripts/show_logs_monitoring.sh index 520a9da1c652553eb90acd083caf5724275f4efe..faa825fdfae2bb85f0790a877b75d533ff5aa0d5 100755 --- a/scripts/show_logs_monitoring.sh +++ b/scripts/show_logs_monitoring.sh @@ -24,4 +24,4 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} # Automated steps start here ######################################################################################################################## -kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringserver +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringservice server diff --git a/src/automation/src/main/java/eu/teraflow/automation/Serializer.java b/src/automation/src/main/java/eu/teraflow/automation/Serializer.java index 816500a57d8431b36f54a95ee714b59b5f984c62..445dea540b57717f1005d8b37269777f7e2147ee 100644 --- a/src/automation/src/main/java/eu/teraflow/automation/Serializer.java +++ b/src/automation/src/main/java/eu/teraflow/automation/Serializer.java @@ -851,6 +851,8 @@ public class Serializer { return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY; case ONF_TR_352: return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352; + case XR: + return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_XR; case UNDEFINED: default: return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_UNDEFINED; @@ -870,6 +872,8 @@ public class Serializer { return DeviceDriverEnum.IETF_NETWORK_TOPOLOGY; case DEVICEDRIVER_ONF_TR_352: return DeviceDriverEnum.ONF_TR_352; + case DEVICEDRIVER_XR: + return DeviceDriverEnum.XR; case DEVICEDRIVER_UNDEFINED: case UNRECOGNIZED: default: diff --git a/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceDriverEnum.java b/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceDriverEnum.java index 8fc767ac2e5d7fed70f0375fcf8c820e30fbb149..fc0521927dfc695229016ad42bc612b27304d6eb 100644 --- a/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceDriverEnum.java +++ b/src/automation/src/main/java/eu/teraflow/automation/context/model/DeviceDriverEnum.java @@ -22,5 +22,6 @@ public enum DeviceDriverEnum { TRANSPORT_API, P4, IETF_NETWORK_TOPOLOGY, - ONF_TR_352 + ONF_TR_352, + XR } diff --git a/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java b/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java index a02fbbca49319feb93de85efbe759a30a4ed3aa9..1161d9552e9794412d6c1ee78b89d2e2404ea3d7 100644 --- a/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java +++ b/src/automation/src/test/java/eu/teraflow/automation/SerializerTest.java @@ -1214,6 +1214,7 @@ class SerializerTest { Arguments.of( DeviceDriverEnum.ONF_TR_352, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352), + Arguments.of(DeviceDriverEnum.XR, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_XR), Arguments.of( DeviceDriverEnum.UNDEFINED, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_UNDEFINED)); } diff --git a/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java b/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java index 3c0d7ce36fcdc4e47697ba11a4ceb3d8e8cdea0c..fbbba62a2baa1c2fe2b3c3fe090883d6542996e4 100644 --- a/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java +++ b/src/automation/target/generated-sources/grpc/context/ContextOuterClass.java @@ -173,6 +173,10 @@ public final class ContextOuterClass { * <code>DEVICEDRIVER_ONF_TR_352 = 5;</code> */ DEVICEDRIVER_ONF_TR_352(5), + /** + * <code>DEVICEDRIVER_XR = 6;</code> + */ + DEVICEDRIVER_XR(6), UNRECOGNIZED(-1), ; @@ -204,6 +208,10 @@ public final class ContextOuterClass { * <code>DEVICEDRIVER_ONF_TR_352 = 5;</code> */ public static final int DEVICEDRIVER_ONF_TR_352_VALUE = 5; + /** + * <code>DEVICEDRIVER_XR = 6;</code> + */ + public static final int DEVICEDRIVER_XR_VALUE = 6; public final int getNumber() { @@ -236,6 +244,7 @@ public final class ContextOuterClass { case 3: return DEVICEDRIVER_P4; case 4: return DEVICEDRIVER_IETF_NETWORK_TOPOLOGY; case 5: return DEVICEDRIVER_ONF_TR_352; + case 6: return DEVICEDRIVER_XR; default: return null; } } @@ -62318,100 +62327,100 @@ public final class ContextOuterClass { "ntext.ContextId\022\025\n\rauthenticated\030\002 \001(\010*j" + "\n\rEventTypeEnum\022\027\n\023EVENTTYPE_UNDEFINED\020\000" + "\022\024\n\020EVENTTYPE_CREATE\020\001\022\024\n\020EVENTTYPE_UPDA" + - "TE\020\002\022\024\n\020EVENTTYPE_REMOVE\020\003*\305\001\n\020DeviceDri" + + "TE\020\002\022\024\n\020EVENTTYPE_REMOVE\020\003*\332\001\n\020DeviceDri" + "verEnum\022\032\n\026DEVICEDRIVER_UNDEFINED\020\000\022\033\n\027D" + "EVICEDRIVER_OPENCONFIG\020\001\022\036\n\032DEVICEDRIVER" + "_TRANSPORT_API\020\002\022\023\n\017DEVICEDRIVER_P4\020\003\022&\n" + "\"DEVICEDRIVER_IETF_NETWORK_TOPOLOGY\020\004\022\033\n" + - "\027DEVICEDRIVER_ONF_TR_352\020\005*\217\001\n\033DeviceOpe" + - "rationalStatusEnum\022%\n!DEVICEOPERATIONALS" + - "TATUS_UNDEFINED\020\000\022$\n DEVICEOPERATIONALST" + - "ATUS_DISABLED\020\001\022#\n\037DEVICEOPERATIONALSTAT" + - "US_ENABLED\020\002*\201\001\n\017ServiceTypeEnum\022\027\n\023SERV" + - "ICETYPE_UNKNOWN\020\000\022\024\n\020SERVICETYPE_L3NM\020\001\022" + - "\024\n\020SERVICETYPE_L2NM\020\002\022)\n%SERVICETYPE_TAP" + - "I_CONNECTIVITY_SERVICE\020\003*\250\001\n\021ServiceStat" + - "usEnum\022\033\n\027SERVICESTATUS_UNDEFINED\020\000\022\031\n\025S" + - "ERVICESTATUS_PLANNED\020\001\022\030\n\024SERVICESTATUS_" + - "ACTIVE\020\002\022!\n\035SERVICESTATUS_PENDING_REMOVA" + - "L\020\003\022\036\n\032SERVICESTATUS_SLA_VIOLATED\020\004*\251\001\n\017" + - "SliceStatusEnum\022\031\n\025SLICESTATUS_UNDEFINED" + - "\020\000\022\027\n\023SLICESTATUS_PLANNED\020\001\022\024\n\020SLICESTAT" + - "US_INIT\020\002\022\026\n\022SLICESTATUS_ACTIVE\020\003\022\026\n\022SLI" + - "CESTATUS_DEINIT\020\004\022\034\n\030SLICESTATUS_SLA_VIO" + - "LATED\020\005*]\n\020ConfigActionEnum\022\032\n\026CONFIGACT" + - "ION_UNDEFINED\020\000\022\024\n\020CONFIGACTION_SET\020\001\022\027\n" + - "\023CONFIGACTION_DELETE\020\002*\203\002\n\022IsolationLeve" + - "lEnum\022\020\n\014NO_ISOLATION\020\000\022\026\n\022PHYSICAL_ISOL" + - "ATION\020\001\022\025\n\021LOGICAL_ISOLATION\020\002\022\025\n\021PROCES" + - "S_ISOLATION\020\003\022\035\n\031PHYSICAL_MEMORY_ISOLATI" + - "ON\020\004\022\036\n\032PHYSICAL_NETWORK_ISOLATION\020\005\022\036\n\032" + - "VIRTUAL_RESOURCE_ISOLATION\020\006\022\037\n\033NETWORK_" + - "FUNCTIONS_ISOLATION\020\007\022\025\n\021SERVICE_ISOLATI" + - "ON\020\0102\331\023\n\016ContextService\022:\n\016ListContextId" + - "s\022\016.context.Empty\032\026.context.ContextIdLis" + - "t\"\000\0226\n\014ListContexts\022\016.context.Empty\032\024.co" + - "ntext.ContextList\"\000\0224\n\nGetContext\022\022.cont" + - "ext.ContextId\032\020.context.Context\"\000\0224\n\nSet" + - "Context\022\020.context.Context\032\022.context.Cont" + - "extId\"\000\0225\n\rRemoveContext\022\022.context.Conte" + - "xtId\032\016.context.Empty\"\000\022=\n\020GetContextEven" + - "ts\022\016.context.Empty\032\025.context.ContextEven" + - "t\"\0000\001\022@\n\017ListTopologyIds\022\022.context.Conte" + - "xtId\032\027.context.TopologyIdList\"\000\022=\n\016ListT" + - "opologies\022\022.context.ContextId\032\025.context." + - "TopologyList\"\000\0227\n\013GetTopology\022\023.context." + - "TopologyId\032\021.context.Topology\"\000\0227\n\013SetTo" + - "pology\022\021.context.Topology\032\023.context.Topo" + - "logyId\"\000\0227\n\016RemoveTopology\022\023.context.Top" + - "ologyId\032\016.context.Empty\"\000\022?\n\021GetTopology" + - "Events\022\016.context.Empty\032\026.context.Topolog" + - "yEvent\"\0000\001\0228\n\rListDeviceIds\022\016.context.Em" + - "pty\032\025.context.DeviceIdList\"\000\0224\n\013ListDevi" + - "ces\022\016.context.Empty\032\023.context.DeviceList" + - "\"\000\0221\n\tGetDevice\022\021.context.DeviceId\032\017.con" + - "text.Device\"\000\0221\n\tSetDevice\022\017.context.Dev" + - "ice\032\021.context.DeviceId\"\000\0223\n\014RemoveDevice" + - "\022\021.context.DeviceId\032\016.context.Empty\"\000\022;\n" + - "\017GetDeviceEvents\022\016.context.Empty\032\024.conte" + - "xt.DeviceEvent\"\0000\001\0224\n\013ListLinkIds\022\016.cont" + - "ext.Empty\032\023.context.LinkIdList\"\000\0220\n\tList" + - "Links\022\016.context.Empty\032\021.context.LinkList" + - "\"\000\022+\n\007GetLink\022\017.context.LinkId\032\r.context" + - ".Link\"\000\022+\n\007SetLink\022\r.context.Link\032\017.cont" + - "ext.LinkId\"\000\022/\n\nRemoveLink\022\017.context.Lin" + - "kId\032\016.context.Empty\"\000\0227\n\rGetLinkEvents\022\016" + - ".context.Empty\032\022.context.LinkEvent\"\0000\001\022>" + - "\n\016ListServiceIds\022\022.context.ContextId\032\026.c" + - "ontext.ServiceIdList\"\000\022:\n\014ListServices\022\022" + - ".context.ContextId\032\024.context.ServiceList" + - "\"\000\0224\n\nGetService\022\022.context.ServiceId\032\020.c" + - "ontext.Service\"\000\0224\n\nSetService\022\020.context" + - ".Service\032\022.context.ServiceId\"\000\0226\n\014UnsetS" + - "ervice\022\020.context.Service\032\022.context.Servi" + - "ceId\"\000\0225\n\rRemoveService\022\022.context.Servic" + - "eId\032\016.context.Empty\"\000\022=\n\020GetServiceEvent" + - "s\022\016.context.Empty\032\025.context.ServiceEvent" + - "\"\0000\001\022:\n\014ListSliceIds\022\022.context.ContextId" + - "\032\024.context.SliceIdList\"\000\0226\n\nListSlices\022\022" + - ".context.ContextId\032\022.context.SliceList\"\000" + - "\022.\n\010GetSlice\022\020.context.SliceId\032\016.context" + - ".Slice\"\000\022.\n\010SetSlice\022\016.context.Slice\032\020.c" + - "ontext.SliceId\"\000\0220\n\nUnsetSlice\022\016.context" + - ".Slice\032\020.context.SliceId\"\000\0221\n\013RemoveSlic" + - "e\022\020.context.SliceId\032\016.context.Empty\"\000\0229\n" + - "\016GetSliceEvents\022\016.context.Empty\032\023.contex" + - "t.SliceEvent\"\0000\001\022D\n\021ListConnectionIds\022\022." + - "context.ServiceId\032\031.context.ConnectionId" + - "List\"\000\022@\n\017ListConnections\022\022.context.Serv" + - "iceId\032\027.context.ConnectionList\"\000\022=\n\rGetC" + - "onnection\022\025.context.ConnectionId\032\023.conte" + - "xt.Connection\"\000\022=\n\rSetConnection\022\023.conte" + - "xt.Connection\032\025.context.ConnectionId\"\000\022;" + - "\n\020RemoveConnection\022\025.context.ConnectionI" + - "d\032\016.context.Empty\"\000\022C\n\023GetConnectionEven" + - "ts\022\016.context.Empty\032\030.context.ConnectionE" + - "vent\"\0000\001b\006proto3" + "\027DEVICEDRIVER_ONF_TR_352\020\005\022\023\n\017DEVICEDRIV" + + "ER_XR\020\006*\217\001\n\033DeviceOperationalStatusEnum\022" + + "%\n!DEVICEOPERATIONALSTATUS_UNDEFINED\020\000\022$" + + "\n DEVICEOPERATIONALSTATUS_DISABLED\020\001\022#\n\037" + + "DEVICEOPERATIONALSTATUS_ENABLED\020\002*\201\001\n\017Se" + + "rviceTypeEnum\022\027\n\023SERVICETYPE_UNKNOWN\020\000\022\024" + + "\n\020SERVICETYPE_L3NM\020\001\022\024\n\020SERVICETYPE_L2NM" + + "\020\002\022)\n%SERVICETYPE_TAPI_CONNECTIVITY_SERV" + + "ICE\020\003*\250\001\n\021ServiceStatusEnum\022\033\n\027SERVICEST" + + "ATUS_UNDEFINED\020\000\022\031\n\025SERVICESTATUS_PLANNE" + + "D\020\001\022\030\n\024SERVICESTATUS_ACTIVE\020\002\022!\n\035SERVICE" + + "STATUS_PENDING_REMOVAL\020\003\022\036\n\032SERVICESTATU" + + "S_SLA_VIOLATED\020\004*\251\001\n\017SliceStatusEnum\022\031\n\025" + + "SLICESTATUS_UNDEFINED\020\000\022\027\n\023SLICESTATUS_P" + + "LANNED\020\001\022\024\n\020SLICESTATUS_INIT\020\002\022\026\n\022SLICES" + + "TATUS_ACTIVE\020\003\022\026\n\022SLICESTATUS_DEINIT\020\004\022\034" + + "\n\030SLICESTATUS_SLA_VIOLATED\020\005*]\n\020ConfigAc" + + "tionEnum\022\032\n\026CONFIGACTION_UNDEFINED\020\000\022\024\n\020" + + "CONFIGACTION_SET\020\001\022\027\n\023CONFIGACTION_DELET" + + "E\020\002*\203\002\n\022IsolationLevelEnum\022\020\n\014NO_ISOLATI" + + "ON\020\000\022\026\n\022PHYSICAL_ISOLATION\020\001\022\025\n\021LOGICAL_" + + "ISOLATION\020\002\022\025\n\021PROCESS_ISOLATION\020\003\022\035\n\031PH" + + "YSICAL_MEMORY_ISOLATION\020\004\022\036\n\032PHYSICAL_NE" + + "TWORK_ISOLATION\020\005\022\036\n\032VIRTUAL_RESOURCE_IS" + + "OLATION\020\006\022\037\n\033NETWORK_FUNCTIONS_ISOLATION" + + "\020\007\022\025\n\021SERVICE_ISOLATION\020\0102\331\023\n\016ContextSer" + + "vice\022:\n\016ListContextIds\022\016.context.Empty\032\026" + + ".context.ContextIdList\"\000\0226\n\014ListContexts" + + "\022\016.context.Empty\032\024.context.ContextList\"\000" + + "\0224\n\nGetContext\022\022.context.ContextId\032\020.con" + + "text.Context\"\000\0224\n\nSetContext\022\020.context.C" + + "ontext\032\022.context.ContextId\"\000\0225\n\rRemoveCo" + + "ntext\022\022.context.ContextId\032\016.context.Empt" + + "y\"\000\022=\n\020GetContextEvents\022\016.context.Empty\032" + + "\025.context.ContextEvent\"\0000\001\022@\n\017ListTopolo" + + "gyIds\022\022.context.ContextId\032\027.context.Topo" + + "logyIdList\"\000\022=\n\016ListTopologies\022\022.context" + + ".ContextId\032\025.context.TopologyList\"\000\0227\n\013G" + + "etTopology\022\023.context.TopologyId\032\021.contex" + + "t.Topology\"\000\0227\n\013SetTopology\022\021.context.To" + + "pology\032\023.context.TopologyId\"\000\0227\n\016RemoveT" + + "opology\022\023.context.TopologyId\032\016.context.E" + + "mpty\"\000\022?\n\021GetTopologyEvents\022\016.context.Em" + + "pty\032\026.context.TopologyEvent\"\0000\001\0228\n\rListD" + + "eviceIds\022\016.context.Empty\032\025.context.Devic" + + "eIdList\"\000\0224\n\013ListDevices\022\016.context.Empty" + + "\032\023.context.DeviceList\"\000\0221\n\tGetDevice\022\021.c" + + "ontext.DeviceId\032\017.context.Device\"\000\0221\n\tSe" + + "tDevice\022\017.context.Device\032\021.context.Devic" + + "eId\"\000\0223\n\014RemoveDevice\022\021.context.DeviceId" + + "\032\016.context.Empty\"\000\022;\n\017GetDeviceEvents\022\016." + + "context.Empty\032\024.context.DeviceEvent\"\0000\001\022" + + "4\n\013ListLinkIds\022\016.context.Empty\032\023.context" + + ".LinkIdList\"\000\0220\n\tListLinks\022\016.context.Emp" + + "ty\032\021.context.LinkList\"\000\022+\n\007GetLink\022\017.con" + + "text.LinkId\032\r.context.Link\"\000\022+\n\007SetLink\022" + + "\r.context.Link\032\017.context.LinkId\"\000\022/\n\nRem" + + "oveLink\022\017.context.LinkId\032\016.context.Empty" + + "\"\000\0227\n\rGetLinkEvents\022\016.context.Empty\032\022.co" + + "ntext.LinkEvent\"\0000\001\022>\n\016ListServiceIds\022\022." + + "context.ContextId\032\026.context.ServiceIdLis" + + "t\"\000\022:\n\014ListServices\022\022.context.ContextId\032" + + "\024.context.ServiceList\"\000\0224\n\nGetService\022\022." + + "context.ServiceId\032\020.context.Service\"\000\0224\n" + + "\nSetService\022\020.context.Service\032\022.context." + + "ServiceId\"\000\0226\n\014UnsetService\022\020.context.Se" + + "rvice\032\022.context.ServiceId\"\000\0225\n\rRemoveSer" + + "vice\022\022.context.ServiceId\032\016.context.Empty" + + "\"\000\022=\n\020GetServiceEvents\022\016.context.Empty\032\025" + + ".context.ServiceEvent\"\0000\001\022:\n\014ListSliceId" + + "s\022\022.context.ContextId\032\024.context.SliceIdL" + + "ist\"\000\0226\n\nListSlices\022\022.context.ContextId\032" + + "\022.context.SliceList\"\000\022.\n\010GetSlice\022\020.cont" + + "ext.SliceId\032\016.context.Slice\"\000\022.\n\010SetSlic" + + "e\022\016.context.Slice\032\020.context.SliceId\"\000\0220\n" + + "\nUnsetSlice\022\016.context.Slice\032\020.context.Sl" + + "iceId\"\000\0221\n\013RemoveSlice\022\020.context.SliceId" + + "\032\016.context.Empty\"\000\0229\n\016GetSliceEvents\022\016.c" + + "ontext.Empty\032\023.context.SliceEvent\"\0000\001\022D\n" + + "\021ListConnectionIds\022\022.context.ServiceId\032\031" + + ".context.ConnectionIdList\"\000\022@\n\017ListConne" + + "ctions\022\022.context.ServiceId\032\027.context.Con" + + "nectionList\"\000\022=\n\rGetConnection\022\025.context" + + ".ConnectionId\032\023.context.Connection\"\000\022=\n\r" + + "SetConnection\022\023.context.Connection\032\025.con" + + "text.ConnectionId\"\000\022;\n\020RemoveConnection\022" + + "\025.context.ConnectionId\032\016.context.Empty\"\000" + + "\022C\n\023GetConnectionEvents\022\016.context.Empty\032" + + "\030.context.ConnectionEvent\"\0000\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, diff --git a/src/common/Constants.py b/src/common/Constants.py index a536ef60047eb1f210f8d98d207134d377adcbed..964d904da704324d6def548103675e815743d818 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -30,8 +30,9 @@ DEFAULT_HTTP_BIND_ADDRESS = '0.0.0.0' DEFAULT_METRICS_PORT = 9192 # Default context and topology UUIDs -DEFAULT_CONTEXT_UUID = 'admin' -DEFAULT_TOPOLOGY_UUID = 'admin' +DEFAULT_CONTEXT_UUID = 'admin' +DEFAULT_TOPOLOGY_UUID = 'admin' # contains the detailed local topology +INTERDOMAIN_TOPOLOGY_UUID = 'inter' # contains the abstract inter-domain topology # Default service names class ServiceNameEnum(Enum): @@ -50,7 +51,7 @@ class ServiceNameEnum(Enum): WEBUI = 'webui' # Used for test and debugging only - DLT_GATEWAY = 'dlt-gateway' + DLT_GATEWAY = 'dltgateway' # Default gRPC service ports DEFAULT_SERVICE_GRPC_PORTS = { diff --git a/src/common/DeviceTypes.py b/src/common/DeviceTypes.py index 08f18dd400296baf373f61901493aa0427e4cf1f..c353708995cd5d8e4a7e2fde8d9bdd03732008eb 100644 --- a/src/common/DeviceTypes.py +++ b/src/common/DeviceTypes.py @@ -16,9 +16,12 @@ from enum import Enum class DeviceTypeEnum(Enum): + # Abstractions + NETWORK = 'network' + # Emulated device types EMULATED_DATACENTER = 'emu-datacenter' - EMULATED_MICROVAWE_RADIO_SYSTEM = 'emu-microwave-radio-system' + EMULATED_MICROWAVE_RADIO_SYSTEM = 'emu-microwave-radio-system' EMULATED_OPEN_LINE_SYSTEM = 'emu-open-line-system' EMULATED_OPTICAL_ROADM = 'emu-optical-roadm' EMULATED_OPTICAL_TRANSPONDER = 'emu-optical-transponder' @@ -28,10 +31,11 @@ class DeviceTypeEnum(Enum): # Real device types DATACENTER = 'datacenter' - MICROVAWE_RADIO_SYSTEM = 'microwave-radio-system' + MICROWAVE_RADIO_SYSTEM = 'microwave-radio-system' OPEN_LINE_SYSTEM = 'open-line-system' OPTICAL_ROADM = 'optical-roadm' OPTICAL_TRANSPONDER = 'optical-transponder' P4_SWITCH = 'p4-switch' PACKET_ROUTER = 'packet-router' PACKET_SWITCH = 'packet-switch' + XR_CONSTELLATION = 'xr-constellation' \ No newline at end of file diff --git a/src/common/database/api/context/slice/__init__.py b/src/common/database/api/context/slice/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/src/common/tests/LoadScenario.py b/src/common/tests/LoadScenario.py new file mode 100644 index 0000000000000000000000000000000000000000..3c3940e67b5772f3ba3ec0634c49f26b92bbc571 --- /dev/null +++ b/src/common/tests/LoadScenario.py @@ -0,0 +1,50 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from slice.client.SliceClient import SliceClient + +LOGGER = logging.getLogger(__name__) +LOGGERS = { + 'success': LOGGER.info, + 'danger' : LOGGER.error, + 'error' : LOGGER.error, +} + +def load_scenario_from_descriptor( + descriptor_file : str, context_client : ContextClient, device_client : DeviceClient, + service_client : ServiceClient, slice_client : SliceClient +) -> DescriptorLoader: + with open(descriptor_file, 'r', encoding='UTF-8') as f: + descriptors = f.read() + + descriptor_loader = DescriptorLoader( + descriptors, + context_client=context_client, device_client=device_client, + service_client=service_client, slice_client=slice_client) + results = descriptor_loader.process() + + num_errors = 0 + for message,level in compose_notifications(results): + LOGGERS.get(level)(message) + if level != 'success': num_errors += 1 + if num_errors > 0: + MSG = 'Failed to load descriptors in file {:s}' + raise Exception(MSG.format(str(descriptor_file))) + + return descriptor_loader \ No newline at end of file diff --git a/src/common/tests/MockServicerImpl_Context.py b/src/common/tests/MockServicerImpl_Context.py index c56ed382adad4b2daa2e3d61575d2973f02bfbe2..27ff45fc58c675fe28090a186059244e2f1178c1 100644 --- a/src/common/tests/MockServicerImpl_Context.py +++ b/src/common/tests/MockServicerImpl_Context.py @@ -115,7 +115,7 @@ class MockServicerImpl_Context(ContextServiceServicer): def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId: LOGGER.info('[SetContext] request={:s}'.format(grpc_message_to_json_string(request))) - return self._set(request, 'context', request.context_uuid.uuid, 'context_id', TOPIC_CONTEXT) + return self._set(request, 'context', request.context_id.context_uuid.uuid, 'context_id', TOPIC_CONTEXT) def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveContext] request={:s}'.format(grpc_message_to_json_string(request))) @@ -233,17 +233,19 @@ class MockServicerImpl_Context(ContextServiceServicer): def SetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId: LOGGER.info('[SetSlice] request={:s}'.format(grpc_message_to_json_string(request))) - return set_entry( - self.database, 'slice[{:s}]'.format(str(request.slice_id.context_id.context_uuid.uuid)), - request.slice_id.slice_uuid.uuid, request).slice_id + container_name = 'slice[{:s}]'.format(str(request.slice_id.context_id.context_uuid.uuid)) + slice_uuid = request.slice_id.slice_uuid.uuid + return self._set(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE) def RemoveSlice(self, request: SliceId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveSlice] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'slice[{:s}]'.format(str(request.context_id.context_uuid.uuid)) - return del_entry(context, self.database, container_name, request.slice_uuid.uuid) + slice_uuid = request.slice_uuid.uuid + return self._del(request, container_name, slice_uuid, 'slice_id', TOPIC_SLICE, context) def GetSliceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]: LOGGER.info('[GetSliceEvents] request={:s}'.format(grpc_message_to_json_string(request))) + for message in self.msg_broker.consume({TOPIC_SLICE}): yield SliceEvent(**json.loads(message.content)) # ----- Service ---------------------------------------------------------------------------------------------------- @@ -272,7 +274,7 @@ class MockServicerImpl_Context(ContextServiceServicer): def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[RemoveService] request={:s}'.format(grpc_message_to_json_string(request))) container_name = 'service[{:s}]'.format(str(request.context_id.context_uuid.uuid)) - service_uuid = request.service_id.service_uuid.uuid + service_uuid = request.service_uuid.uuid return self._del(request, container_name, service_uuid, 'service_id', TOPIC_SERVICE, context) def GetServiceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]: diff --git a/src/common/tests/MockServicerImpl_DltGateway.py b/src/common/tests/MockServicerImpl_DltGateway.py index 2d750168238b2a041badd1974f27e57f62363d90..f106519b2695cda519e95a79e7b559dd24818108 100644 --- a/src/common/tests/MockServicerImpl_DltGateway.py +++ b/src/common/tests/MockServicerImpl_DltGateway.py @@ -36,6 +36,10 @@ class AlreadyExistsException(Exception): class DoesNotExistException(Exception): pass +MSG_NOT_EXISTS = 'RecordId({:s}, {:s}, {:s}) Does Not Exist' +MSG_ALREADY_EXISTS = 'RecordId({:s}, {:s}, {:s}) Already Exists' +MSG_OPERATION_NOT_IMPLEMENTED = 'DltRecordOperationEnum({:s}) Not Implemented' + class MockServicerImpl_DltGateway(DltGatewayServiceServicer): def __init__(self): LOGGER.info('[__init__] Creating Servicer...') @@ -43,16 +47,12 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer): self.msg_broker = MockMessageBroker() LOGGER.info('[__init__] Servicer Created') - def __get_record(self, record_id : DltRecordId, should_exist : bool) -> Optional[Dict]: + def __get_record(self, record_id : DltRecordId) -> Optional[Dict]: domain_uuid, record_uuid = record_id.domain_uuid.uuid, record_id.record_uuid.uuid str_type = DltRecordTypeEnum.Name(record_id.type).upper().replace('DLTRECORDTYPE_', '') records_domain : Dict[str, Dict] = self.records.setdefault(domain_uuid, {}) records_type : Dict[str, Dict] = records_domain.setdefault(str_type, {}) record : Optional[Dict] = records_type.get(record_uuid) - if should_exist and record is None: - raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) - elif not should_exist and record is not None: - raise AlreadyExistsException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) return record def __set_record(self, record_id : DltRecordId, should_exist : bool, data_json : str) -> None: @@ -62,10 +62,10 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer): records_type : Dict[str, Dict] = records_domain.setdefault(str_type, {}) record : Optional[Dict] = records_type.get(record_uuid) if should_exist and record is None: - raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) + raise DoesNotExistException(MSG_NOT_EXISTS.format(domain_uuid, str_type, record_uuid)) elif not should_exist and record is not None: - raise AlreadyExistsException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) - records_type[record_uuid] = json.loads(data_json) + raise AlreadyExistsException(MSG_ALREADY_EXISTS.format(domain_uuid, str_type, record_uuid)) + records_type[record_uuid] = data_json def __del_record(self, record_id : DltRecordId) -> None: domain_uuid, record_uuid = record_id.domain_uuid.uuid, record_id.record_uuid.uuid @@ -74,7 +74,7 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer): records_type : Dict[str, Dict] = records_domain.setdefault(str_type, {}) record : Optional[Dict] = records_type.get(record_uuid) if record is None: - raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) + raise DoesNotExistException(MSG_NOT_EXISTS.format(domain_uuid, str_type, record_uuid)) records_type.discard(record_uuid) def __publish(self, operation : DltRecordOperationEnum, record_id : DltRecordId) -> None: @@ -99,14 +99,14 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer): try: operation : DltRecordOperationEnum = request.operation if operation == DLTRECORDOPERATION_ADD: - self.__set_record(record_id, False, request.data_json) + self.__set_record(record_id, False, json.loads(request.data_json)) elif operation == DLTRECORDOPERATION_UPDATE: - self.__set_record(record_id, True, request.data_json) + self.__set_record(record_id, True, json.loads(request.data_json)) elif operation == DLTRECORDOPERATION_DELETE: self.__del_record(record_id) else: str_operation = DltRecordOperationEnum.Name(operation).upper().replace('DLTRECORDOPERATION_', '') - raise NotImplementedError('DltRecordOperationEnum({:s})'.format(str_operation)) + raise NotImplementedError(MSG_OPERATION_NOT_IMPLEMENTED.format(str_operation)) self.__publish(operation, record_id) response.status = DLTRECORDSTATUS_SUCCEEDED except Exception as e: # pylint: disable=broad-except @@ -117,11 +117,12 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer): def GetFromDlt(self, request : DltRecordId, context : grpc.ServicerContext) -> DltRecord: LOGGER.info('[GetFromDlt] request={:s}'.format(grpc_message_to_json_string(request))) - record = self.__get_record(request, True) + record = self.__get_record(request) response = DltRecord() - response.record_id.CopyFrom(request) # pylint: disable=no-member - response.operation = DLTRECORDOPERATION_UNDEFINED - response.data_json = json.dumps(record, sort_keys=True) + if record is not None: + response.record_id.CopyFrom(request) # pylint: disable=no-member + response.operation = DLTRECORDOPERATION_UNDEFINED + response.data_json = json.dumps(record, sort_keys=True) LOGGER.info('[GetFromDlt] response={:s}'.format(grpc_message_to_json_string(response))) return response diff --git a/src/common/tools/context_queries/CheckType.py b/src/common/tools/context_queries/CheckType.py new file mode 100644 index 0000000000000000000000000000000000000000..f53ad16906336182311d1d98fec428f1472bf748 --- /dev/null +++ b/src/common/tools/context_queries/CheckType.py @@ -0,0 +1,28 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Union +from common.DeviceTypes import DeviceTypeEnum + +def device_type_is_datacenter(device_type : Union[str, DeviceTypeEnum]) -> bool: + return device_type in { + DeviceTypeEnum.DATACENTER, DeviceTypeEnum.DATACENTER.value, + DeviceTypeEnum.EMULATED_DATACENTER, DeviceTypeEnum.EMULATED_DATACENTER.value + } + +def device_type_is_network(device_type : Union[str, DeviceTypeEnum]) -> bool: + return device_type in {DeviceTypeEnum.NETWORK, DeviceTypeEnum.NETWORK.value} + +def endpoint_type_is_border(endpoint_type : str) -> bool: + return str(endpoint_type).endswith('/border') diff --git a/src/common/tools/context_queries/Context.py b/src/common/tools/context_queries/Context.py new file mode 100644 index 0000000000000000000000000000000000000000..cf0d3be2b7c1890e486492ad55add19a17591353 --- /dev/null +++ b/src/common/tools/context_queries/Context.py @@ -0,0 +1,25 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.context_pb2 import Context, Empty +from common.tools.object_factory.Context import json_context +from context.client.ContextClient import ContextClient + +def create_context( + context_client : ContextClient, context_uuid : str +) -> None: + existing_context_ids = context_client.ListContextIds(Empty()) + existing_context_uuids = {context_id.context_uuid.uuid for context_id in existing_context_ids.context_ids} + if context_uuid in existing_context_uuids: return + context_client.SetContext(Context(**json_context(context_uuid))) diff --git a/src/common/tools/context_queries/Device.py b/src/common/tools/context_queries/Device.py new file mode 100644 index 0000000000000000000000000000000000000000..e5b205d46185e12fa51a2cbd8146342abe5bed38 --- /dev/null +++ b/src/common/tools/context_queries/Device.py @@ -0,0 +1,59 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Set +from common.proto.context_pb2 import ContextId, Device, Empty, Topology, TopologyId +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient + +def get_existing_device_uuids(context_client : ContextClient) -> Set[str]: + existing_device_ids = context_client.ListDeviceIds(Empty()) + existing_device_uuids = {device_id.device_uuid.uuid for device_id in existing_device_ids.device_ids} + return existing_device_uuids + +def add_device_to_topology( + context_client : ContextClient, context_id : ContextId, topology_uuid : str, device_uuid : str +) -> bool: + topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=context_id)) + topology_ro = context_client.GetTopology(topology_id) + device_uuids = {device_id.device_uuid.uuid for device_id in topology_ro.device_ids} + if device_uuid in device_uuids: return False # already existed + + topology_rw = Topology() + topology_rw.CopyFrom(topology_ro) + topology_rw.device_ids.add().device_uuid.uuid = device_uuid # pylint: disable=no-member + context_client.SetTopology(topology_rw) + return True + +def get_uuids_of_devices_in_topology( + context_client : ContextClient, context_id : ContextId, topology_uuid : str +) -> List[str]: + topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=context_id)) + topology = context_client.GetTopology(topology_id) + device_uuids = [device_id.device_uuid.uuid for device_id in topology.device_ids] + return device_uuids + +def get_devices_in_topology( + context_client : ContextClient, context_id : ContextId, topology_uuid : str +) -> List[Device]: + device_uuids = get_uuids_of_devices_in_topology(context_client, context_id, topology_uuid) + + all_devices = context_client.ListDevices(Empty()) + devices_in_topology = list() + for device in all_devices.devices: + device_uuid = device.device_id.device_uuid.uuid + if device_uuid not in device_uuids: continue + devices_in_topology.append(device) + + return devices_in_topology diff --git a/src/common/tools/context_queries/InterDomain.py b/src/common/tools/context_queries/InterDomain.py new file mode 100644 index 0000000000000000000000000000000000000000..0a202ccd810ed50beca4bb9a7b4441305623f1ed --- /dev/null +++ b/src/common/tools/context_queries/InterDomain.py @@ -0,0 +1,256 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Dict, List, Set, Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.DeviceTypes import DeviceTypeEnum +from common.proto.context_pb2 import ContextId, Device, Empty, EndPointId, ServiceTypeEnum, Slice +from common.proto.pathcomp_pb2 import PathCompRequest +from common.tools.context_queries.CheckType import device_type_is_network +from common.tools.context_queries.Device import get_devices_in_topology +from common.tools.context_queries.Topology import get_topology +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from pathcomp.frontend.client.PathCompClient import PathCompClient + +LOGGER = logging.getLogger(__name__) + +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) +DATACENTER_DEVICE_TYPES = {DeviceTypeEnum.DATACENTER, DeviceTypeEnum.EMULATED_DATACENTER} + +def get_local_device_uuids(context_client : ContextClient) -> Set[str]: + topologies = context_client.ListTopologies(ADMIN_CONTEXT_ID) + topologies = {topology.topology_id.topology_uuid.uuid : topology for topology in topologies.topologies} + LOGGER.info('[get_local_device_uuids] topologies.keys()={:s}'.format(str(topologies.keys()))) + + local_topology_uuids = set(topologies.keys()) + local_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID) + LOGGER.info('[get_local_device_uuids] local_topology_uuids={:s}'.format(str(local_topology_uuids))) + + local_device_uuids = set() + + # add topology names except DEFAULT_TOPOLOGY_UUID and INTERDOMAIN_TOPOLOGY_UUID; they are abstracted as a + # local device in inter-domain and the name of the topology is used as abstract device name + for local_topology_uuid in local_topology_uuids: + if local_topology_uuid == DEFAULT_TOPOLOGY_UUID: continue + local_device_uuids.add(local_topology_uuid) + + # add physical devices in the local topologies + for local_topology_uuid in local_topology_uuids: + topology_device_ids = topologies[local_topology_uuid].device_ids + topology_device_uuids = {device_id.device_uuid.uuid for device_id in topology_device_ids} + LOGGER.info('[get_local_device_uuids] [loop] local_topology_uuid={:s} topology_device_uuids={:s}'.format( + str(local_topology_uuid), str(topology_device_uuids))) + local_device_uuids.update(topology_device_uuids) + + LOGGER.info('[get_local_device_uuids] local_device_uuids={:s}'.format(str(local_device_uuids))) + return local_device_uuids + +def get_interdomain_device_uuids(context_client : ContextClient) -> Set[str]: + context_uuid = DEFAULT_CONTEXT_UUID + topology_uuid = INTERDOMAIN_TOPOLOGY_UUID + interdomain_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid) + if interdomain_topology is None: + MSG = '[get_interdomain_device_uuids] {:s}/{:s} topology not found' + LOGGER.warning(MSG.format(context_uuid, topology_uuid)) + return set() + + # add abstracted devices in the interdomain topology + interdomain_device_ids = interdomain_topology.device_ids + interdomain_device_uuids = {device_id.device_uuid.uuid for device_id in interdomain_device_ids} + LOGGER.info('[get_interdomain_device_uuids] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids))) + return interdomain_device_uuids + +def get_local_domain_devices(context_client : ContextClient) -> List[Device]: + local_device_uuids = get_local_device_uuids(context_client) + all_devices = context_client.ListDevices(Empty()) + local_domain_devices = list() + for device in all_devices.devices: + if not device_type_is_network(device.device_type): continue + device_uuid = device.device_id.device_uuid.uuid + if device_uuid not in local_device_uuids: continue + local_domain_devices.append(device) + return local_domain_devices + +def is_inter_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool: + interdomain_device_uuids = get_interdomain_device_uuids(context_client) + LOGGER.info('[is_inter_domain] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids))) + non_interdomain_endpoint_ids = [ + endpoint_id + for endpoint_id in endpoint_ids + if endpoint_id.device_id.device_uuid.uuid not in interdomain_device_uuids + ] + str_non_interdomain_endpoint_ids = [ + (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) + for endpoint_id in non_interdomain_endpoint_ids + ] + LOGGER.info('[is_inter_domain] non_interdomain_endpoint_ids={:s}'.format(str(str_non_interdomain_endpoint_ids))) + is_inter_domain_ = len(non_interdomain_endpoint_ids) == 0 + LOGGER.info('[is_inter_domain] is_inter_domain={:s}'.format(str(is_inter_domain_))) + return is_inter_domain_ + +def is_multi_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool: + local_device_uuids = get_local_device_uuids(context_client) + LOGGER.info('[is_multi_domain] local_device_uuids={:s}'.format(str(local_device_uuids))) + remote_endpoint_ids = [ + endpoint_id + for endpoint_id in endpoint_ids + if endpoint_id.device_id.device_uuid.uuid not in local_device_uuids + ] + str_remote_endpoint_ids = [ + (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) + for endpoint_id in remote_endpoint_ids + ] + LOGGER.info('[is_multi_domain] remote_endpoint_ids={:s}'.format(str(str_remote_endpoint_ids))) + is_multi_domain_ = len(remote_endpoint_ids) > 0 + LOGGER.info('[is_multi_domain] is_multi_domain={:s}'.format(str(is_multi_domain_))) + return is_multi_domain_ + +def compute_interdomain_path( + pathcomp_client : PathCompClient, slice_ : Slice +) -> List[Tuple[str, List[EndPointId]]]: + context_uuid = slice_.slice_id.context_id.context_uuid.uuid + slice_uuid = slice_.slice_id.slice_uuid.uuid + + pathcomp_req = PathCompRequest() + pathcomp_req.shortest_path.Clear() # pylint: disable=no-member + pathcomp_req_svc = pathcomp_req.services.add() # pylint: disable=no-member + pathcomp_req_svc.service_id.context_id.context_uuid.uuid = context_uuid + pathcomp_req_svc.service_id.service_uuid.uuid = slice_uuid + pathcomp_req_svc.service_type = ServiceTypeEnum.SERVICETYPE_L2NM + + for endpoint_id in slice_.slice_endpoint_ids: + service_endpoint_id = pathcomp_req_svc.service_endpoint_ids.add() + service_endpoint_id.CopyFrom(endpoint_id) + + constraint_bw = pathcomp_req_svc.service_constraints.add() + constraint_bw.custom.constraint_type = 'bandwidth[gbps]' + constraint_bw.custom.constraint_value = '10.0' + + constraint_lat = pathcomp_req_svc.service_constraints.add() + constraint_lat.custom.constraint_type = 'latency[ms]' + constraint_lat.custom.constraint_value = '100.0' + + LOGGER.info('pathcomp_req = {:s}'.format(grpc_message_to_json_string(pathcomp_req))) + pathcomp_rep = pathcomp_client.Compute(pathcomp_req) + LOGGER.info('pathcomp_rep = {:s}'.format(grpc_message_to_json_string(pathcomp_rep))) + + service = next(iter([ + service + for service in pathcomp_rep.services + if service.service_id == pathcomp_req_svc.service_id + ]), None) + if service is None: + str_service_id = grpc_message_to_json_string(pathcomp_req_svc.service_id) + raise Exception('Service({:s}) not found'.format(str_service_id)) + + connection = next(iter([ + connection + for connection in pathcomp_rep.connections + if connection.service_id == pathcomp_req_svc.service_id + ]), None) + if connection is None: + str_service_id = grpc_message_to_json_string(pathcomp_req_svc.service_id) + raise Exception('Connection for Service({:s}) not found'.format(str_service_id)) + + domain_list : List[str] = list() + domain_to_endpoint_ids : Dict[str, List[EndPointId]] = dict() + for endpoint_id in connection.path_hops_endpoint_ids: + device_uuid = endpoint_id.device_id.device_uuid.uuid + #endpoint_uuid = endpoint_id.endpoint_uuid.uuid + if device_uuid not in domain_to_endpoint_ids: domain_list.append(device_uuid) + domain_to_endpoint_ids.setdefault(device_uuid, []).append(endpoint_id) + + return [ + (domain_uuid, domain_to_endpoint_ids.get(domain_uuid)) + for domain_uuid in domain_list + ] + +def get_device_to_domain_map(context_client : ContextClient) -> Dict[str, str]: + devices_to_domains : Dict[str, str] = dict() + contexts = context_client.ListContexts(Empty()) + for context in contexts.contexts: + context_id = context.context_id + context_uuid = context_id.context_uuid.uuid + topologies = context_client.ListTopologies(context_id) + if context_uuid == DEFAULT_CONTEXT_UUID: + for topology in topologies.topologies: + topology_id = topology.topology_id + topology_uuid = topology_id.topology_uuid.uuid + if topology_uuid in {DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID}: continue + + # add topology names except DEFAULT_TOPOLOGY_UUID and INTERDOMAIN_TOPOLOGY_UUID; they are + # abstracted as a local device in inter-domain and the name of the topology is used as + # abstract device name + devices_to_domains[topology_uuid] = topology_uuid + + # add physical devices in the local topology + for device_id in topology.device_ids: + device_uuid = device_id.device_uuid.uuid + devices_to_domains[device_uuid] = topology_uuid + else: + # for each topology in a remote context + for topology in topologies.topologies: + topology_id = topology.topology_id + topology_uuid = topology_id.topology_uuid.uuid + + # if topology is not interdomain + if topology_uuid in {INTERDOMAIN_TOPOLOGY_UUID}: continue + + # add devices to the remote domain list + for device_id in topology.device_ids: + device_uuid = device_id.device_uuid.uuid + devices_to_domains[device_uuid] = context_uuid + + return devices_to_domains + +def compute_traversed_domains( + context_client : ContextClient, interdomain_path : List[Tuple[str, List[EndPointId]]] +) -> List[Tuple[str, bool, List[EndPointId]]]: + + local_device_uuids = get_local_device_uuids(context_client) + LOGGER.info('[compute_traversed_domains] local_device_uuids={:s}'.format(str(local_device_uuids))) + + interdomain_devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID) + interdomain_devices = { + device.device_id.device_uuid.uuid : device + for device in interdomain_devices + } + + devices_to_domains = get_device_to_domain_map(context_client) + LOGGER.info('[compute_traversed_domains] devices_to_domains={:s}'.format(str(devices_to_domains))) + + traversed_domains : List[Tuple[str, bool, List[EndPointId]]] = list() + domains_dict : Dict[str, Tuple[str, bool, List[EndPointId]]] = dict() + for device_uuid, endpoint_ids in interdomain_path: + domain_uuid = devices_to_domains.get(device_uuid, '---') + domain = domains_dict.get(domain_uuid) + if domain is None: + is_local_domain = domain_uuid in local_device_uuids + domain = (domain_uuid, is_local_domain, []) + traversed_domains.append(domain) + domains_dict[domain_uuid] = domain + domain[2].extend(endpoint_ids) + + str_traversed_domains = [ + (domain_uuid, is_local_domain, [ + (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) + for endpoint_id in endpoint_ids + ]) + for domain_uuid,is_local_domain,endpoint_ids in traversed_domains + ] + LOGGER.info('[compute_traversed_domains] devices_to_domains={:s}'.format(str(str_traversed_domains))) + return traversed_domains diff --git a/src/common/tools/context_queries/Link.py b/src/common/tools/context_queries/Link.py new file mode 100644 index 0000000000000000000000000000000000000000..abc5fa91af8d24c8a3cdf18fda0e7680da9143a7 --- /dev/null +++ b/src/common/tools/context_queries/Link.py @@ -0,0 +1,59 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Set +from common.proto.context_pb2 import ContextId, Empty, Link, Topology, TopologyId +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient + +def get_existing_link_uuids(context_client : ContextClient) -> Set[str]: + existing_link_ids = context_client.ListLinkIds(Empty()) + existing_link_uuids = {link_id.link_uuid.uuid for link_id in existing_link_ids.link_ids} + return existing_link_uuids + +def add_link_to_topology( + context_client : ContextClient, context_id : ContextId, topology_uuid : str, link_uuid : str +) -> bool: + topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=context_id)) + topology_ro = context_client.GetTopology(topology_id) + link_uuids = {link_id.link_uuid.uuid for link_id in topology_ro.link_ids} + if link_uuid in link_uuids: return False # already existed + + topology_rw = Topology() + topology_rw.CopyFrom(topology_ro) + topology_rw.link_ids.add().link_uuid.uuid = link_uuid # pylint: disable=no-member + context_client.SetTopology(topology_rw) + return True + +def get_uuids_of_links_in_topology( + context_client : ContextClient, context_id : ContextId, topology_uuid : str +) -> List[str]: + topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=context_id)) + topology = context_client.GetTopology(topology_id) + link_uuids = [link_id.link_uuid.uuid for link_id in topology.link_ids] + return link_uuids + +def get_links_in_topology( + context_client : ContextClient, context_id : ContextId, topology_uuid : str +) -> List[Link]: + link_uuids = get_uuids_of_links_in_topology(context_client, context_id, topology_uuid) + + all_links = context_client.ListLinks(Empty()) + links_in_topology = list() + for link in all_links.links: + link_uuid = link.link_id.link_uuid.uuid + if link_uuid not in link_uuids: continue + links_in_topology.append(link) + + return links_in_topology diff --git a/src/common/tools/context_queries/Service.py b/src/common/tools/context_queries/Service.py new file mode 100644 index 0000000000000000000000000000000000000000..15b201e731760068457683d9e30f79ab12d231d7 --- /dev/null +++ b/src/common/tools/context_queries/Service.py @@ -0,0 +1,39 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from typing import Optional +from common.Constants import DEFAULT_CONTEXT_UUID +from common.proto.context_pb2 import Service, ServiceId +from context.client.ContextClient import ContextClient + +LOGGER = logging.getLogger(__name__) + +def get_service( + context_client : ContextClient, service_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID, + rw_copy : bool = False + ) -> Optional[Service]: + try: + # pylint: disable=no-member + service_id = ServiceId() + service_id.context_id.context_uuid.uuid = context_uuid + service_id.service_uuid.uuid = service_uuid + ro_service = context_client.GetService(service_id) + if not rw_copy: return ro_service + rw_service = Service() + rw_service.CopyFrom(ro_service) + return rw_service + except grpc.RpcError: + #LOGGER.exception('Unable to get service({:s} / {:s})'.format(str(context_uuid), str(service_uuid))) + return None diff --git a/src/common/tools/context_queries/Slice.py b/src/common/tools/context_queries/Slice.py new file mode 100644 index 0000000000000000000000000000000000000000..9f884aa94990c28ad786b3243aed948ddc7f9f34 --- /dev/null +++ b/src/common/tools/context_queries/Slice.py @@ -0,0 +1,39 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from typing import Optional +from common.Constants import DEFAULT_CONTEXT_UUID +from common.proto.context_pb2 import Slice, SliceId +from context.client.ContextClient import ContextClient + +LOGGER = logging.getLogger(__name__) + +def get_slice( + context_client : ContextClient, slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID, + rw_copy : bool = False + ) -> Optional[Slice]: + try: + # pylint: disable=no-member + slice_id = SliceId() + slice_id.context_id.context_uuid.uuid = context_uuid + slice_id.slice_uuid.uuid = slice_uuid + ro_slice = context_client.GetSlice(slice_id) + if not rw_copy: return ro_slice + rw_slice = Slice() + rw_slice.CopyFrom(ro_slice) + return rw_slice + except grpc.RpcError: + #LOGGER.exception('Unable to get slice({:s} / {:s})'.format(str(context_uuid), str(slice_uuid))) + return None diff --git a/src/common/tools/context_queries/Topology.py b/src/common/tools/context_queries/Topology.py new file mode 100644 index 0000000000000000000000000000000000000000..3d2077e965efb3e78ad9febbe54b4f0aaea5aef6 --- /dev/null +++ b/src/common/tools/context_queries/Topology.py @@ -0,0 +1,63 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from typing import List, Optional +from common.Constants import DEFAULT_CONTEXT_UUID +from common.proto.context_pb2 import ContextId, Topology, TopologyId +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology +from context.client.ContextClient import ContextClient + +LOGGER = logging.getLogger(__name__) + +def create_topology( + context_client : ContextClient, context_uuid : str, topology_uuid : str +) -> None: + context_id = ContextId(**json_context_id(context_uuid)) + existing_topology_ids = context_client.ListTopologyIds(context_id) + existing_topology_uuids = {topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids} + if topology_uuid in existing_topology_uuids: return + context_client.SetTopology(Topology(**json_topology(topology_uuid, context_id=context_id))) + +def create_missing_topologies( + context_client : ContextClient, context_id : ContextId, topology_uuids : List[str] +) -> None: + # Find existing topologies within own context + existing_topology_ids = context_client.ListTopologyIds(context_id) + existing_topology_uuids = {topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids} + + # Create topologies within provided context + for topology_uuid in topology_uuids: + if topology_uuid in existing_topology_uuids: continue + grpc_topology = Topology(**json_topology(topology_uuid, context_id=context_id)) + context_client.SetTopology(grpc_topology) + +def get_topology( + context_client : ContextClient, topology_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID, + rw_copy : bool = False + ) -> Optional[Topology]: + try: + # pylint: disable=no-member + topology_id = TopologyId() + topology_id.context_id.context_uuid.uuid = context_uuid + topology_id.topology_uuid.uuid = topology_uuid + ro_topology = context_client.GetTopology(topology_id) + if not rw_copy: return ro_topology + rw_topology = Topology() + rw_topology.CopyFrom(ro_topology) + return rw_topology + except grpc.RpcError: + #LOGGER.exception('Unable to get topology({:s} / {:s})'.format(str(context_uuid), str(topology_uuid))) + return None diff --git a/src/tests/netx22-p4/tests/__init__.py b/src/common/tools/context_queries/__init__.py similarity index 100% rename from src/tests/netx22-p4/tests/__init__.py rename to src/common/tools/context_queries/__init__.py diff --git a/src/common/tools/descriptor/Loader.py b/src/common/tools/descriptor/Loader.py new file mode 100644 index 0000000000000000000000000000000000000000..f14e2caf6065996ea6223449f309e03d141b5954 --- /dev/null +++ b/src/common/tools/descriptor/Loader.py @@ -0,0 +1,254 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# SDN controller descriptor loader + +# Usage example (WebUI): +# descriptors = json.loads(descriptors_data_from_client) +# descriptor_loader = DescriptorLoader(descriptors) +# results = descriptor_loader.process() +# for message,level in compose_notifications(results): +# flash(message, level) + +# Usage example (pytest): +# with open('path/to/descriptor.json', 'r', encoding='UTF-8') as f: +# descriptors = json.loads(f.read()) +# descriptor_loader = DescriptorLoader( +# descriptors, context_client=..., device_client=..., service_client=..., slice_client=...) +# results = descriptor_loader.process() +# loggers = {'success': LOGGER.info, 'danger': LOGGER.error, 'error': LOGGER.error} +# for message,level in compose_notifications(results): +# loggers.get(level)(message) + +import json +from typing import Dict, List, Optional, Tuple, Union +from common.proto.context_pb2 import Connection, Context, Device, Link, Service, Slice, Topology +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from slice.client.SliceClient import SliceClient +from .Tools import ( + format_device_custom_config_rules, format_service_custom_config_rules, format_slice_custom_config_rules, + get_descriptors_add_contexts, get_descriptors_add_services, get_descriptors_add_slices, + get_descriptors_add_topologies, split_devices_by_rules) + +ENTITY_TO_TEXT = { + # name => singular, plural + 'context' : ('Context', 'Contexts' ), + 'topology' : ('Topology', 'Topologies' ), + 'device' : ('Device', 'Devices' ), + 'link' : ('Link', 'Links' ), + 'service' : ('Service', 'Services' ), + 'slice' : ('Slice', 'Slices' ), + 'connection': ('Connection', 'Connections'), +} + +ACTION_TO_TEXT = { + # action => infinitive, past + 'add' : ('Add', 'Added'), + 'update' : ('Update', 'Updated'), + 'config' : ('Configure', 'Configured'), +} + +TypeResults = List[Tuple[str, str, int, List[str]]] # entity_name, action, num_ok, list[error] +TypeNotification = Tuple[str, str] # message, level +TypeNotificationList = List[TypeNotification] + +def compose_notifications(results : TypeResults) -> TypeNotificationList: + notifications = [] + for entity_name, action_name, num_ok, error_list in results: + entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name] + action_infinitive, action_past = ACTION_TO_TEXT[action_name] + num_err = len(error_list) + for error in error_list: + notifications.append((f'Unable to {action_infinitive} {entity_name_singluar} {error}', 'error')) + if num_ok : notifications.append((f'{str(num_ok)} {entity_name_plural} {action_past}', 'success')) + if num_err: notifications.append((f'{str(num_err)} {entity_name_plural} failed', 'danger')) + return notifications + +class DescriptorLoader: + def __init__( + self, descriptors : Union[str, Dict], + context_client : Optional[ContextClient] = None, device_client : Optional[DeviceClient] = None, + service_client : Optional[ServiceClient] = None, slice_client : Optional[SliceClient] = None + ) -> None: + self.__descriptors = json.loads(descriptors) if isinstance(descriptors, str) else descriptors + self.__dummy_mode = self.__descriptors.get('dummy_mode' , False) + self.__contexts = self.__descriptors.get('contexts' , []) + self.__topologies = self.__descriptors.get('topologies' , []) + self.__devices = self.__descriptors.get('devices' , []) + self.__links = self.__descriptors.get('links' , []) + self.__services = self.__descriptors.get('services' , []) + self.__slices = self.__descriptors.get('slices' , []) + self.__connections = self.__descriptors.get('connections', []) + + self.__contexts_add = None + self.__topologies_add = None + self.__devices_add = None + self.__devices_config = None + self.__services_add = None + self.__slices_add = None + + self.__ctx_cli = ContextClient() if context_client is None else context_client + self.__dev_cli = DeviceClient() if device_client is None else device_client + self.__svc_cli = ServiceClient() if service_client is None else service_client + self.__slc_cli = SliceClient() if slice_client is None else slice_client + + self.__results : TypeResults = list() + + @property + def contexts(self) -> List[Dict]: return self.__contexts + + @property + def num_contexts(self) -> int: return len(self.__contexts) + + @property + def topologies(self) -> Dict[str, List[Dict]]: + _topologies = {} + for topology in self.__topologies: + context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid'] + _topologies.setdefault(context_uuid, []).append(topology) + return _topologies + + @property + def num_topologies(self) -> Dict[str, int]: + _num_topologies = {} + for topology in self.__topologies: + context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid'] + _num_topologies[context_uuid] = _num_topologies.get(context_uuid, 0) + 1 + return _num_topologies + + @property + def devices(self) -> List[Dict]: return self.__devices + + @property + def num_devices(self) -> int: return len(self.__devices) + + @property + def links(self) -> List[Dict]: return self.__links + + @property + def num_links(self) -> int: return len(self.__links) + + @property + def services(self) -> Dict[str, List[Dict]]: + _services = {} + for service in self.__services: + context_uuid = service['service_id']['context_id']['context_uuid']['uuid'] + _services.setdefault(context_uuid, []).append(service) + return _services + + @property + def num_services(self) -> Dict[str, int]: + _num_services = {} + for service in self.__services: + context_uuid = service['service_id']['context_id']['context_uuid']['uuid'] + _num_services[context_uuid] = _num_services.get(context_uuid, 0) + 1 + return _num_services + + @property + def slices(self) -> Dict[str, List[Dict]]: + _slices = {} + for slice_ in self.__slices: + context_uuid = slice_['slice_id']['context_id']['context_uuid']['uuid'] + _slices.setdefault(context_uuid, []).append(slice_) + return _slices + + @property + def num_slices(self) -> Dict[str, int]: + _num_slices = {} + for slice_ in self.__slices: + context_uuid = slice_['slice_id']['context_id']['context_uuid']['uuid'] + _num_slices[context_uuid] = _num_slices.get(context_uuid, 0) + 1 + return _num_slices + + @property + def connections(self) -> List[Dict]: return self.__connections + + @property + def num_connections(self) -> int: return len(self.__connections) + + def process(self) -> TypeResults: + # Format CustomConfigRules in Devices, Services and Slices provided in JSON format + self.__devices = [format_device_custom_config_rules (device ) for device in self.__devices ] + self.__services = [format_service_custom_config_rules(service) for service in self.__services] + self.__slices = [format_slice_custom_config_rules (slice_ ) for slice_ in self.__slices ] + + # Context and Topology require to create the entity first, and add devices, links, services, + # slices, etc. in a second stage. + self.__contexts_add = get_descriptors_add_contexts(self.__contexts) + self.__topologies_add = get_descriptors_add_topologies(self.__topologies) + + if self.__dummy_mode: + self._dummy_mode() + else: + self._normal_mode() + + return self.__results + + def _dummy_mode(self) -> None: + # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks. + self.__ctx_cli.connect() + self._process_descr('context', 'add', self.__ctx_cli.SetContext, Context, self.__contexts_add ) + self._process_descr('topology', 'add', self.__ctx_cli.SetTopology, Topology, self.__topologies_add) + self._process_descr('device', 'add', self.__ctx_cli.SetDevice, Device, self.__devices ) + self._process_descr('link', 'add', self.__ctx_cli.SetLink, Link, self.__links ) + self._process_descr('service', 'add', self.__ctx_cli.SetService, Service, self.__services ) + self._process_descr('slice', 'add', self.__ctx_cli.SetSlice, Slice, self.__slices ) + self._process_descr('connection', 'add', self.__ctx_cli.SetConnection, Connection, self.__connections ) + self._process_descr('context', 'update', self.__ctx_cli.SetContext, Context, self.__contexts ) + self._process_descr('topology', 'update', self.__ctx_cli.SetTopology, Topology, self.__topologies ) + #self.__ctx_cli.close() + + def _normal_mode(self) -> None: + # Normal mode: follows the automated workflows in the different components + assert len(self.__connections) == 0, 'in normal mode, connections should not be set' + + # Device, Service and Slice require to first create the entity and the configure it + self.__devices_add, self.__devices_config = split_devices_by_rules(self.__devices) + self.__services_add = get_descriptors_add_services(self.__services) + self.__slices_add = get_descriptors_add_slices(self.__slices) + + self.__ctx_cli.connect() + self.__dev_cli.connect() + self.__svc_cli.connect() + self.__slc_cli.connect() + + self._process_descr('context', 'add', self.__ctx_cli.SetContext, Context, self.__contexts_add ) + self._process_descr('topology', 'add', self.__ctx_cli.SetTopology, Topology, self.__topologies_add) + self._process_descr('device', 'add', self.__dev_cli.AddDevice, Device, self.__devices_add ) + self._process_descr('device', 'config', self.__dev_cli.ConfigureDevice, Device, self.__devices_config) + self._process_descr('link', 'add', self.__ctx_cli.SetLink, Link, self.__links ) + self._process_descr('service', 'add', self.__svc_cli.CreateService, Service, self.__services_add ) + self._process_descr('service', 'update', self.__svc_cli.UpdateService, Service, self.__services ) + self._process_descr('slice', 'add', self.__slc_cli.CreateSlice, Slice, self.__slices_add ) + self._process_descr('slice', 'update', self.__slc_cli.UpdateSlice, Slice, self.__slices ) + self._process_descr('context', 'update', self.__ctx_cli.SetContext, Context, self.__contexts ) + self._process_descr('topology', 'update', self.__ctx_cli.SetTopology, Topology, self.__topologies ) + + #self.__slc_cli.close() + #self.__svc_cli.close() + #self.__dev_cli.close() + #self.__ctx_cli.close() + + def _process_descr(self, entity_name, action_name, grpc_method, grpc_class, entities) -> None: + num_ok, error_list = 0, [] + for entity in entities: + try: + grpc_method(grpc_class(**entity)) + num_ok += 1 + except Exception as e: # pylint: disable=broad-except + error_list.append(f'{str(entity)}: {str(e)}') + num_err += 1 + self.__results.append((entity_name, action_name, num_ok, error_list)) diff --git a/src/webui/service/main/DescriptorTools.py b/src/common/tools/descriptor/Tools.py similarity index 79% rename from src/webui/service/main/DescriptorTools.py rename to src/common/tools/descriptor/Tools.py index 094be2f7d0cfd69ddb5cddc2238e8cec64c75daa..909cec9d97b5baa2f7b0198091c3921a71c9b1f7 100644 --- a/src/webui/service/main/DescriptorTools.py +++ b/src/common/tools/descriptor/Tools.py @@ -41,8 +41,8 @@ def get_descriptors_add_services(services : List[Dict]) -> List[Dict]: def get_descriptors_add_slices(slices : List[Dict]) -> List[Dict]: slices_add = [] - for slice in slices: - slice_copy = copy.deepcopy(slice) + for slice_ in slices: + slice_copy = copy.deepcopy(slice_) slice_copy['slice_endpoint_ids'] = [] slice_copy['slice_constraints'] = [] slice_copy['slice_config'] = {'config_rules': []} @@ -59,6 +59,24 @@ def format_custom_config_rules(config_rules : List[Dict]) -> List[Dict]: config_rule['custom']['resource_value'] = custom_resource_value return config_rules +def format_device_custom_config_rules(device : Dict) -> Dict: + config_rules = device.get('device_config', {}).get('config_rules', []) + config_rules = format_custom_config_rules(config_rules) + device['device_config']['config_rules'] = config_rules + return device + +def format_service_custom_config_rules(service : Dict) -> Dict: + config_rules = service.get('service_config', {}).get('config_rules', []) + config_rules = format_custom_config_rules(config_rules) + service['service_config']['config_rules'] = config_rules + return service + +def format_slice_custom_config_rules(slice_ : Dict) -> Dict: + config_rules = slice_.get('service_config', {}).get('config_rules', []) + config_rules = format_custom_config_rules(config_rules) + slice_['service_config']['config_rules'] = config_rules + return slice_ + def split_devices_by_rules(devices : List[Dict]) -> Tuple[List[Dict], List[Dict]]: devices_add = [] devices_config = [] diff --git a/src/common/tools/descriptor/__init__.py b/src/common/tools/descriptor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/common/tools/descriptor/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/ofc22/tests/BuildDescriptors.py b/src/common/tools/descriptor/old/BuildDescriptors.py similarity index 100% rename from src/tests/ofc22/tests/BuildDescriptors.py rename to src/common/tools/descriptor/old/BuildDescriptors.py diff --git a/src/tests/ofc22/tests/LoadDescriptors.py b/src/common/tools/descriptor/old/LoadDescriptors.py similarity index 100% rename from src/tests/ofc22/tests/LoadDescriptors.py rename to src/common/tools/descriptor/old/LoadDescriptors.py index 33bc699af933601e4c6d4b8dbc7b0c51206241ef..f0b19196afbcd67c1f20263791d20820489b9cf5 100644 --- a/src/tests/ofc22/tests/LoadDescriptors.py +++ b/src/common/tools/descriptor/old/LoadDescriptors.py @@ -14,8 +14,8 @@ import json, logging, sys from common.Settings import get_setting -from context.client.ContextClient import ContextClient from common.proto.context_pb2 import Context, Device, Link, Topology +from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient LOGGER = logging.getLogger(__name__) diff --git a/src/common/tools/grpc/Constraints.py b/src/common/tools/grpc/Constraints.py index a9dd4f40cbd823752b8cc09936ac48ebe32ec1a5..aa95767ab2807e4ac7ac331c47622a8ece0e88ff 100644 --- a/src/common/tools/grpc/Constraints.py +++ b/src/common/tools/grpc/Constraints.py @@ -21,7 +21,33 @@ from typing import Any, Dict, Optional, Tuple from common.proto.context_pb2 import Constraint, EndPointId from common.tools.grpc.Tools import grpc_message_to_json_string -def update_constraint_custom(constraints, constraint_type : str, fields : Dict[str, Tuple[Any, bool]]) -> Constraint: +def update_constraint_custom_scalar( + constraints, constraint_type : str, value : Any, raise_if_differs : bool = False +) -> Constraint: + + for constraint in constraints: + if constraint.WhichOneof('constraint') != 'custom': continue + if constraint.custom.constraint_type != constraint_type: continue + json_constraint_value = json.loads(constraint.custom.constraint_value) + break # found, end loop + else: + # not found, add it + constraint = constraints.add() # pylint: disable=no-member + constraint.custom.constraint_type = constraint_type + json_constraint_value = None + + if (json_constraint_value is None) or not raise_if_differs: + # missing or raise_if_differs=False, add/update it + json_constraint_value = value + elif json_constraint_value != value: + # exists, differs, and raise_if_differs=True + msg = 'Specified value({:s}) differs existing value({:s})' + raise Exception(msg.format(str(value), str(json_constraint_value))) + + constraint.custom.constraint_value = json.dumps(json_constraint_value, sort_keys=True) + return constraint + +def update_constraint_custom_dict(constraints, constraint_type : str, fields : Dict[str, Tuple[Any, bool]]) -> Constraint: # fields: Dict[field_name : str, Tuple[field_value : Any, raise_if_differs : bool]] for constraint in constraints: @@ -45,6 +71,7 @@ def update_constraint_custom(constraints, constraint_type : str, fields : Dict[s raise Exception(msg.format(str(field_name), str(field_value), str(json_constraint_value[field_name]))) constraint.custom.constraint_value = json.dumps(json_constraint_value, sort_keys=True) + return constraint def update_constraint_endpoint_location( constraints, endpoint_id : EndPointId, @@ -129,10 +156,18 @@ def copy_constraints(source_constraints, target_constraints): if constraint_kind == 'custom': custom = source_constraint.custom constraint_type = custom.constraint_type - constraint_value = json.loads(custom.constraint_value) - raise_if_differs = True - fields = {name:(value, raise_if_differs) for name,value in constraint_value.items()} - update_constraint_custom(target_constraints, constraint_type, fields) + try: + constraint_value = json.loads(custom.constraint_value) + except: # pylint: disable=bare-except + constraint_value = custom.constraint_value + if isinstance(constraint_value, dict): + raise_if_differs = True + fields = {name:(value, raise_if_differs) for name,value in constraint_value.items()} + update_constraint_custom_dict(target_constraints, constraint_type, fields) + else: + raise_if_differs = True + update_constraint_custom_scalar( + target_constraints, constraint_type, constraint_value, raise_if_differs=raise_if_differs) elif constraint_kind == 'endpoint_location': endpoint_id = source_constraint.endpoint_location.endpoint_id diff --git a/src/common/tools/object_factory/Device.py b/src/common/tools/object_factory/Device.py index 4a590134dd7b455c92b62fc5e4aa9fece0f874b4..666d65f1e739b4cca8b665846f1775dcd6130e1c 100644 --- a/src/common/tools/object_factory/Device.py +++ b/src/common/tools/object_factory/Device.py @@ -33,8 +33,11 @@ DEVICE_PR_DRIVERS = [DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG] DEVICE_TAPI_TYPE = DeviceTypeEnum.OPEN_LINE_SYSTEM.value DEVICE_TAPI_DRIVERS = [DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API] +DEVICE_XR_CONSTELLATION_TYPE = DeviceTypeEnum.XR_CONSTELLATION.value +DEVICE_XR_CONSTELLATION_DRIVERS = [DeviceDriverEnum.DEVICEDRIVER_XR] + # check which enum type and value assign to microwave device -DEVICE_MICROWAVE_TYPE = DeviceTypeEnum.MICROVAWE_RADIO_SYSTEM.value +DEVICE_MICROWAVE_TYPE = DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value DEVICE_MICROWAVE_DRIVERS = [DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY] DEVICE_P4_TYPE = DeviceTypeEnum.P4_SWITCH.value @@ -94,6 +97,14 @@ def json_device_tapi_disabled( return json_device( device_uuid, DEVICE_TAPI_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, drivers=drivers) +def json_device_xr_constellation_disabled( + device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [], + drivers : List[Dict] = DEVICE_XR_CONSTELLATION_DRIVERS + ): + return json_device( + device_uuid, DEVICE_XR_CONSTELLATION_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, + drivers=drivers) + def json_device_microwave_disabled( device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [], drivers : List[Dict] = DEVICE_MICROWAVE_DRIVERS diff --git a/src/common/tools/object_factory/PolicyRule.py b/src/common/tools/object_factory/PolicyRule.py new file mode 100644 index 0000000000000000000000000000000000000000..8702f931dfffef175ce6c25de24a10de8286effc --- /dev/null +++ b/src/common/tools/object_factory/PolicyRule.py @@ -0,0 +1,48 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Dict, List, Optional +from common.proto.policy_condition_pb2 import BooleanOperator + +LOGGER = logging.getLogger(__name__) + +def json_policy_rule_id(policy_rule_uuid : str) -> Dict: + return {'uuid': {'uuid': policy_rule_uuid}} + +def json_policy_rule( + policy_rule_uuid : str, policy_priority : int = 1, + boolean_operator : BooleanOperator = BooleanOperator.POLICYRULE_CONDITION_BOOLEAN_AND, + condition_list : List[Dict] = [], action_list : List[Dict] = [], + service_id : Optional[Dict] = None, device_id_list : List[Dict] = [] +) -> Dict: + basic = { + 'policyRuleId': json_policy_rule_id(policy_rule_uuid), + 'priority': policy_priority, + 'conditionList': condition_list, + 'booleanOperator': boolean_operator, + 'actionList': action_list, + } + + result = {} + if service_id is not None: + policy_rule_type = 'service' + result[policy_rule_type] = {'policyRuleBasic': basic} + result[policy_rule_type]['serviceId'] = service_id + else: + policy_rule_type = 'device' + result[policy_rule_type] = {'policyRuleBasic': basic} + + result[policy_rule_type]['deviceList'] = device_id_list + return result diff --git a/src/common/tools/object_factory/Slice.py b/src/common/tools/object_factory/Slice.py new file mode 100644 index 0000000000000000000000000000000000000000..6ab666aa6ed379eb0b8948b1178aa13069d70bf4 --- /dev/null +++ b/src/common/tools/object_factory/Slice.py @@ -0,0 +1,48 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +from typing import Dict, List, Optional +from common.proto.context_pb2 import SliceStatusEnum + +def get_slice_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str: + return 'slc:{:s}/{:s}=={:s}/{:s}'.format( + a_endpoint_id['device_id']['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'], + z_endpoint_id['device_id']['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid']) + +def json_slice_id(slice_uuid : str, context_id : Optional[Dict] = None) -> Dict: + result = {'slice_uuid': {'uuid': slice_uuid}} + if context_id is not None: result['context_id'] = copy.deepcopy(context_id) + return result + +def json_slice_owner(owner_uuid : str, owner_string : str) -> Dict: + return {'owner_uuid': {'uuid': owner_uuid}, 'owner_string': owner_string} + +def json_slice( + slice_uuid : str, context_id : Optional[Dict] = None, + status : SliceStatusEnum = SliceStatusEnum.SLICESTATUS_PLANNED, endpoint_ids : List[Dict] = [], + constraints : List[Dict] = [], config_rules : List[Dict] = [], service_ids : List[Dict] = [], + subslice_ids : List[Dict] = [], owner : Optional[Dict] = None): + + result = { + 'slice_id' : json_slice_id(slice_uuid, context_id=context_id), + 'slice_status' : {'slice_status': status}, + 'slice_endpoint_ids': copy.deepcopy(endpoint_ids), + 'slice_constraints' : copy.deepcopy(constraints), + 'slice_config' : {'config_rules': copy.deepcopy(config_rules)}, + 'slice_service_ids' : copy.deepcopy(service_ids), + 'slice_subslice_ids': copy.deepcopy(subslice_ids), + } + if owner is not None: result['slice_owner'] = owner + return result diff --git a/src/common/type_checkers/Assertions.py b/src/common/type_checkers/Assertions.py index 20ffa9ad619a40d6da4f3830c202d1a545545b51..aa9ede33303fca73d033ee2a40dec587882a5bb1 100644 --- a/src/common/type_checkers/Assertions.py +++ b/src/common/type_checkers/Assertions.py @@ -32,6 +32,7 @@ def validate_device_driver_enum(message): 'DEVICEDRIVER_P4', 'DEVICEDRIVER_IETF_NETWORK_TOPOLOGY', 'DEVICEDRIVER_ONF_TR_352', + 'DEVICEDRIVER_XR', ] def validate_device_operational_status_enum(message): diff --git a/src/compute/Dockerfile b/src/compute/Dockerfile index bdc07584c5bd8f08fdef6f997cc18dcfd9eeb3e6..90a69c0f503724fd1098608d85ad5eca874e3f8b 100644 --- a/src/compute/Dockerfile +++ b/src/compute/Dockerfile @@ -66,6 +66,8 @@ COPY src/compute/. compute/ COPY src/context/. context/ COPY src/service/. service/ COPY src/slice/. slice/ +RUN mkdir -p /var/teraflow/tests/tools +COPY src/tests/tools/mock_osm/. tests/tools/mock_osm/ # Start the service ENTRYPOINT ["python", "-m", "compute.service"] diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py index 7e050289f19b93dc710185c2b29b326bbfd156d2..e3d12088147a59c3fd9e0179d3a3d957483fcc22 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py @@ -17,10 +17,10 @@ from flask import request from flask.json import jsonify from flask_restful import Resource from common.proto.context_pb2 import SliceStatusEnum +from common.tools.context_queries.Slice import get_slice from context.client.ContextClient import ContextClient from slice.client.SliceClient import SliceClient from .tools.Authentication import HTTP_AUTH -from .tools.ContextMethods import get_slice from .tools.HttpStatusCodes import HTTP_GATEWAYTIMEOUT, HTTP_NOCONTENT, HTTP_OK, HTTP_SERVERERROR LOGGER = logging.getLogger(__name__) @@ -34,7 +34,7 @@ class L2VPN_Service(Resource): try: context_client = ContextClient() - target = get_slice(context_client, vpn_id) + target = get_slice(context_client, vpn_id, rw_copy=True) if target is None: raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 3cc823a2aa7a06de6cb591ef6d668ba7eeef5cbd..819d8995da6ffc3a7913c8781e4021ce83665e29 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -20,9 +20,10 @@ from flask.wrappers import Response from flask_restful import Resource from werkzeug.exceptions import UnsupportedMediaType from common.proto.context_pb2 import Slice +from common.tools.context_queries.Slice import get_slice from common.tools.grpc.ConfigRules import update_config_rule_custom from common.tools.grpc.Constraints import ( - update_constraint_custom, update_constraint_endpoint_location, update_constraint_endpoint_priority, + update_constraint_custom_dict, update_constraint_endpoint_location, update_constraint_endpoint_priority, update_constraint_sla_availability) from common.tools.grpc.EndPointIds import update_endpoint_ids from common.tools.grpc.Tools import grpc_message_to_json_string @@ -30,7 +31,6 @@ from context.client.ContextClient import ContextClient from slice.client.SliceClient import SliceClient from .schemas.site_network_access import SCHEMA_SITE_NETWORK_ACCESS from .tools.Authentication import HTTP_AUTH -from .tools.ContextMethods import get_slice from .tools.HttpStatusCodes import HTTP_NOCONTENT, HTTP_SERVERERROR from .tools.Validator import validate_message from .Constants import ( @@ -69,7 +69,7 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s address_ip, address_prefix, remote_router, circuit_id ) = mapping - target = get_slice(context_client, vpn_id) + target = get_slice(context_client, vpn_id, rw_copy=True) if target is None: raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) endpoint_ids = target.slice_endpoint_ids # pylint: disable=no-member @@ -99,7 +99,7 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s update_config_rule_custom(config_rules, endpoint_settings_key, field_updates) if len(diversity_constraints) > 0: - update_constraint_custom(constraints, 'diversity', diversity_constraints) + update_constraint_custom_dict(constraints, 'diversity', diversity_constraints) update_constraint_endpoint_location(constraints, endpoint_id, region=site_id) if access_priority is not None: update_constraint_endpoint_priority(constraints, endpoint_id, access_priority) diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/tools/ContextMethods.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/tools/ContextMethods.py deleted file mode 100644 index ac9e6fe4a5c138d00bc80fd953de2cc21d4677b5..0000000000000000000000000000000000000000 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/tools/ContextMethods.py +++ /dev/null @@ -1,39 +0,0 @@ -import grpc, logging -from typing import Optional -from common.Constants import DEFAULT_CONTEXT_UUID -from common.proto.context_pb2 import Service, ServiceId, Slice, SliceId -from context.client.ContextClient import ContextClient - -LOGGER = logging.getLogger(__name__) - -def get_service( - context_client : ContextClient, service_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID - ) -> Optional[Service]: - try: - # pylint: disable=no-member - service_id = ServiceId() - service_id.context_id.context_uuid.uuid = context_uuid - service_id.service_uuid.uuid = service_uuid - service_readonly = context_client.GetService(service_id) - service = Service() - service.CopyFrom(service_readonly) - return service - except grpc.RpcError: - #LOGGER.exception('Unable to get service({:s} / {:s})'.format(str(context_uuid), str(service_uuid))) - return None - -def get_slice( - context_client : ContextClient, slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID - ) -> Optional[Slice]: - try: - # pylint: disable=no-member - slice_id = SliceId() - slice_id.context_id.context_uuid.uuid = context_uuid - slice_id.slice_uuid.uuid = slice_uuid - slice_readonly = context_client.GetSlice(slice_id) - slice_ = Slice() - slice_.CopyFrom(slice_readonly) - return slice_ - except grpc.RpcError: - #LOGGER.exception('Unable to get slice({:s} / {:s})'.format(str(context_uuid), str(slice_uuid))) - return None diff --git a/src/compute/tests/PrepareTestScenario.py b/src/compute/tests/PrepareTestScenario.py index d534a4a28280c80964096a9cb7291c498ebe6b93..06fb34f9ee7508f4bd6fa769da78c50eb78c3bb8 100644 --- a/src/compute/tests/PrepareTestScenario.py +++ b/src/compute/tests/PrepareTestScenario.py @@ -19,7 +19,7 @@ from common.Settings import ( from compute.service.rest_server.RestServer import RestServer from compute.service.rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn from compute.tests.MockService_Dependencies import MockService_Dependencies -from .mock_osm.MockOSM import MockOSM +from tests.tools.mock_osm.MockOSM import MockOSM from .Constants import WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD LOCAL_HOST = '127.0.0.1' diff --git a/src/compute/tests/test_unitary.py b/src/compute/tests/test_unitary.py index 05c45c1b3554d21084a4a20cac6856b049fe7ca3..acef6d4a68cb1e89df2fa567d437412c8805b35f 100644 --- a/src/compute/tests/test_unitary.py +++ b/src/compute/tests/test_unitary.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from .mock_osm.MockOSM import MockOSM +from tests.tools.mock_osm.MockOSM import MockOSM from .Constants import SERVICE_CONNECTION_POINTS_1, SERVICE_CONNECTION_POINTS_2, SERVICE_TYPE from .PrepareTestScenario import ( # pylint: disable=unused-import # be careful, order of symbols is important here! diff --git a/src/context/client/ContextClient.py b/src/context/client/ContextClient.py index da907341f799def94694817242c106a913e03327..f91f36cf5bf73669e4010c8c65d9c4cabd9c6e2e 100644 --- a/src/context/client/ContextClient.py +++ b/src/context/client/ContextClient.py @@ -28,6 +28,8 @@ from common.proto.context_pb2 import ( Slice, SliceEvent, SliceId, SliceIdList, SliceList, Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList) from common.proto.context_pb2_grpc import ContextServiceStub +from common.proto.context_policy_pb2_grpc import ContextPolicyServiceStub +from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule LOGGER = logging.getLogger(__name__) MAX_RETRIES = 15 @@ -42,17 +44,20 @@ class ContextClient: LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint))) self.channel = None self.stub = None + self.policy_stub = None self.connect() LOGGER.debug('Channel created') def connect(self): self.channel = grpc.insecure_channel(self.endpoint) self.stub = ContextServiceStub(self.channel) + self.policy_stub = ContextPolicyServiceStub(self.channel) def close(self): if self.channel is not None: self.channel.close() self.channel = None self.stub = None + self.policy_stub = None @RETRY_DECORATOR def ListContextIds(self, request: Empty) -> ContextIdList: @@ -361,3 +366,38 @@ class ContextClient: response = self.stub.GetConnectionEvents(request) LOGGER.debug('GetConnectionEvents result: {:s}'.format(grpc_message_to_json_string(response))) return response + + @RETRY_DECORATOR + def ListPolicyRuleIds(self, request: Empty) -> PolicyRuleIdList: + LOGGER.debug('ListPolicyRuleIds request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.policy_stub.ListPolicyRuleIds(request) + LOGGER.debug('ListPolicyRuleIds result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def ListPolicyRules(self, request: Empty) -> PolicyRuleList: + LOGGER.debug('ListPolicyRules request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.policy_stub.ListPolicyRules(request) + LOGGER.debug('ListPolicyRules result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def GetPolicyRule(self, request: PolicyRuleId) -> PolicyRule: + LOGGER.info('GetPolicyRule request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.policy_stub.GetPolicyRule(request) + LOGGER.info('GetPolicyRule result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def SetPolicyRule(self, request: PolicyRule) -> PolicyRuleId: + LOGGER.debug('SetPolicyRule request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.policy_stub.SetPolicyRule(request) + LOGGER.debug('SetPolicyRule result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def RemovePolicyRule(self, request: PolicyRuleId) -> Empty: + LOGGER.debug('RemovePolicyRule request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.policy_stub.RemovePolicyRule(request) + LOGGER.debug('RemovePolicyRule result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/context/client/EventsCollector.py b/src/context/client/EventsCollector.py index 9715098bd3cd979d78a83b4839e40613d3997d1e..f5fc3fbc735c2f62b39223b9ed20aa3730ecd11d 100644 --- a/src/context/client/EventsCollector.py +++ b/src/context/client/EventsCollector.py @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, logging, queue, threading +from typing import Callable +import grpc, logging, queue, threading, time from common.proto.context_pb2 import Empty from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient @@ -20,6 +21,41 @@ from context.client.ContextClient import ContextClient LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) +class _Collector(threading.Thread): + def __init__( + self, subscription_func : Callable, events_queue = queue.Queue, + terminate = threading.Event, log_events_received: bool = False + ) -> None: + super().__init__(daemon=False) + self._subscription_func = subscription_func + self._events_queue = events_queue + self._terminate = terminate + self._log_events_received = log_events_received + self._stream = None + + def cancel(self) -> None: + if self._stream is None: return + self._stream.cancel() + + def run(self) -> None: + while not self._terminate.is_set(): + self._stream = self._subscription_func() + try: + for event in self._stream: + if self._log_events_received: + str_event = grpc_message_to_json_string(event) + LOGGER.info('[_collect] event: {:s}'.format(str_event)) + self._events_queue.put_nowait(event) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.UNAVAILABLE: + LOGGER.info('[_collect] UNAVAILABLE... retrying...') + time.sleep(0.5) + continue + elif e.code() == grpc.StatusCode.CANCELLED: + break + else: + raise # pragma: no cover + class EventsCollector: def __init__( self, context_client : ContextClient, @@ -31,60 +67,49 @@ class EventsCollector: activate_service_collector : bool = True, activate_slice_collector : bool = True, activate_connection_collector : bool = True, - ) -> None: self._events_queue = queue.Queue() + self._terminate = threading.Event() self._log_events_received = log_events_received - self._context_stream, self._context_thread = None, None - if activate_context_collector: - self._context_stream = context_client.GetContextEvents(Empty()) - self._context_thread = self._create_collector_thread(self._context_stream) - - self._topology_stream, self._topology_thread = None, None - if activate_topology_collector: - self._topology_stream = context_client.GetTopologyEvents(Empty()) - self._topology_thread = self._create_collector_thread(self._topology_stream) - - self._device_stream, self._device_thread = None, None - if activate_device_collector: - self._device_stream = context_client.GetDeviceEvents(Empty()) - self._device_thread = self._create_collector_thread(self._device_stream) - - self._link_stream, self._link_thread = None, None - if activate_link_collector: - self._link_stream = context_client.GetLinkEvents(Empty()) - self._link_thread = self._create_collector_thread(self._link_stream) - - self._service_stream, self._service_thread = None, None - if activate_service_collector: - self._service_stream = context_client.GetServiceEvents(Empty()) - self._service_thread = self._create_collector_thread(self._service_stream) - - self._slice_stream, self._slice_thread = None, None - if activate_slice_collector: - self._slice_stream = context_client.GetSliceEvents(Empty()) - self._slice_thread = self._create_collector_thread(self._slice_stream) - - self._connection_stream, self._connection_thread = None, None - if activate_connection_collector: - self._connection_stream = context_client.GetConnectionEvents(Empty()) - self._connection_thread = self._create_collector_thread(self._connection_stream) - - def _create_collector_thread(self, stream, as_daemon : bool = False): - return threading.Thread(target=self._collect, args=(stream,), daemon=as_daemon) - - def _collect(self, events_stream) -> None: - try: - for event in events_stream: - if self._log_events_received: - LOGGER.info('[_collect] event: {:s}'.format(grpc_message_to_json_string(event))) - self._events_queue.put_nowait(event) - except grpc.RpcError as e: - if e.code() != grpc.StatusCode.CANCELLED: # pylint: disable=no-member - raise # pragma: no cover + self._context_thread = _Collector( + lambda: context_client.GetContextEvents(Empty()), + self._events_queue, self._terminate, self._log_events_received + ) if activate_context_collector else None + + self._topology_thread = _Collector( + lambda: context_client.GetTopologyEvents(Empty()), + self._events_queue, self._terminate, self._log_events_received + ) if activate_topology_collector else None + + self._device_thread = _Collector( + lambda: context_client.GetDeviceEvents(Empty()), + self._events_queue, self._terminate, self._log_events_received + ) if activate_device_collector else None + + self._link_thread = _Collector( + lambda: context_client.GetLinkEvents(Empty()), + self._events_queue, self._terminate, self._log_events_received + ) if activate_link_collector else None + + self._service_thread = _Collector( + lambda: context_client.GetServiceEvents(Empty()), + self._events_queue, self._terminate, self._log_events_received + ) if activate_service_collector else None + + self._slice_thread = _Collector( + lambda: context_client.GetSliceEvents(Empty()), + self._events_queue, self._terminate, self._log_events_received + ) if activate_slice_collector else None + + self._connection_thread = _Collector( + lambda: context_client.GetConnectionEvents(Empty()), + self._events_queue, self._terminate, self._log_events_received + ) if activate_connection_collector else None def start(self): + self._terminate.clear() + if self._context_thread is not None: self._context_thread.start() if self._topology_thread is not None: self._topology_thread.start() if self._device_thread is not None: self._device_thread.start() @@ -102,25 +127,28 @@ class EventsCollector: def get_events(self, block : bool = True, timeout : float = 0.1, count : int = None): events = [] if count is None: - while True: + while not self._terminate.is_set(): event = self.get_event(block=block, timeout=timeout) if event is None: break events.append(event) else: for _ in range(count): + if self._terminate.is_set(): break event = self.get_event(block=block, timeout=timeout) if event is None: continue events.append(event) return sorted(events, key=lambda e: e.event.timestamp.timestamp) def stop(self): - if self._context_stream is not None: self._context_stream.cancel() - if self._topology_stream is not None: self._topology_stream.cancel() - if self._device_stream is not None: self._device_stream.cancel() - if self._link_stream is not None: self._link_stream.cancel() - if self._service_stream is not None: self._service_stream.cancel() - if self._slice_stream is not None: self._slice_stream.cancel() - if self._connection_stream is not None: self._connection_stream.cancel() + self._terminate.set() + + if self._context_thread is not None: self._context_thread.cancel() + if self._topology_thread is not None: self._topology_thread.cancel() + if self._device_thread is not None: self._device_thread.cancel() + if self._link_thread is not None: self._link_thread.cancel() + if self._service_thread is not None: self._service_thread.cancel() + if self._slice_thread is not None: self._slice_thread.cancel() + if self._connection_thread is not None: self._connection_thread.cancel() if self._context_thread is not None: self._context_thread.join() if self._topology_thread is not None: self._topology_thread.join() diff --git a/src/context/service/database/DeviceModel.py b/src/context/service/database/DeviceModel.py index 0d42326793b44473d8aef3da2c3e9ce8464bd1c4..0ffb97fee51da62802a1f7eb730380ba7a89dc0f 100644 --- a/src/context/service/database/DeviceModel.py +++ b/src/context/service/database/DeviceModel.py @@ -35,6 +35,7 @@ class ORM_DeviceDriverEnum(Enum): P4 = DeviceDriverEnum.DEVICEDRIVER_P4 IETF_NETWORK_TOPOLOGY = DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY ONF_TR_352 = DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352 + XR = DeviceDriverEnum.DEVICEDRIVER_XR grpc_to_enum__device_driver = functools.partial( grpc_to_enum, DeviceDriverEnum, ORM_DeviceDriverEnum) diff --git a/src/context/service/database/PolicyRuleModel.py b/src/context/service/database/PolicyRuleModel.py new file mode 100644 index 0000000000000000000000000000000000000000..7c84ea940482091a5667b2f11272748c7b444b6f --- /dev/null +++ b/src/context/service/database/PolicyRuleModel.py @@ -0,0 +1,32 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import json +from typing import Dict +from common.orm.fields.PrimaryKeyField import PrimaryKeyField +from common.orm.fields.StringField import StringField +from common.orm.model.Model import Model + +LOGGER = logging.getLogger(__name__) + +class PolicyRuleModel(Model): + pk = PrimaryKeyField() + value = StringField(required=True, allow_empty=False) + + def dump_id(self) -> Dict: + return {'uuid': {'uuid': self.pk}} + + def dump(self) -> Dict: + return json.loads(self.value) diff --git a/src/context/service/database/SliceModel.py b/src/context/service/database/SliceModel.py index bc00ada43758c9c5ffefbb88a87134aa46fbd73a..74bb60b401f656fdcfec8b0466019f87a8f1b41e 100644 --- a/src/context/service/database/SliceModel.py +++ b/src/context/service/database/SliceModel.py @@ -46,6 +46,8 @@ class SliceModel(Model): slice_constraints_fk = ForeignKeyField(ConstraintsModel) slice_status = EnumeratedField(ORM_SliceStatusEnum, required=True) slice_config_fk = ForeignKeyField(ConfigModel) + slice_owner_uuid = StringField(required=False, allow_empty=True) + slice_owner_string = StringField(required=False, allow_empty=True) def delete(self) -> None: # pylint: disable=import-outside-toplevel @@ -91,7 +93,11 @@ class SliceModel(Model): def dump_subslice_ids(self) -> List[Dict]: from .RelationModels import SliceSubSliceModel # pylint: disable=import-outside-toplevel db_subslices = get_related_objects(self, SliceSubSliceModel, 'sub_slice_fk') - return [db_subslice.dump_id() for db_subslice in sorted(db_subslices, key=operator.attrgetter('pk'))] + return [ + db_subslice.dump_id() + for db_subslice in sorted(db_subslices, key=operator.attrgetter('pk')) + if db_subslice.pk != self.pk # if I'm subslice of other slice, I will appear as subslice of myself + ] def dump( # pylint: disable=arguments-differ self, include_endpoint_ids=True, include_constraints=True, include_config_rules=True, @@ -106,4 +112,11 @@ class SliceModel(Model): if include_config_rules: result.setdefault('slice_config', {})['config_rules'] = self.dump_config() if include_service_ids: result['slice_service_ids'] = self.dump_service_ids() if include_subslice_ids: result['slice_subslice_ids'] = self.dump_subslice_ids() + + if len(self.slice_owner_uuid) > 0: + result.setdefault('slice_owner', {}).setdefault('owner_uuid', {})['uuid'] = self.slice_owner_uuid + + if len(self.slice_owner_string) > 0: + result.setdefault('slice_owner', {})['owner_string'] = self.slice_owner_string + return result diff --git a/src/context/service/grpc_server/ContextService.py b/src/context/service/grpc_server/ContextService.py index 1b54ec5400c93cba3882dccb197479b75bb699af..5d4dd8bb991ed64a970f9815bb302fd33d51cf34 100644 --- a/src/context/service/grpc_server/ContextService.py +++ b/src/context/service/grpc_server/ContextService.py @@ -17,6 +17,7 @@ from common.Settings import get_service_port_grpc from common.message_broker.MessageBroker import MessageBroker from common.orm.Database import Database from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server +from common.proto.context_policy_pb2_grpc import add_ContextPolicyServiceServicer_to_server from common.tools.service.GenericGrpcService import GenericGrpcService from .ContextServiceServicerImpl import ContextServiceServicerImpl @@ -31,3 +32,4 @@ class ContextService(GenericGrpcService): def install_servicers(self): add_ContextServiceServicer_to_server(self.context_servicer, self.server) + add_ContextPolicyServiceServicer_to_server(self.context_servicer, self.server) diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py index 88f7bd8af82009f1fc45bace87776d9cbc6d6543..f8dd188198606805e42449c3d690c20d3ad45f03 100644 --- a/src/context/service/grpc_server/ContextServiceServicerImpl.py +++ b/src/context/service/grpc_server/ContextServiceServicerImpl.py @@ -28,13 +28,17 @@ from common.proto.context_pb2 import ( Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList, Slice, SliceEvent, SliceId, SliceIdList, SliceList, Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList) +from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule) from common.proto.context_pb2_grpc import ContextServiceServicer +from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException +from common.tools.grpc.Tools import grpc_message_to_json from context.service.database.ConfigModel import update_config from context.service.database.ConnectionModel import ConnectionModel, set_path from context.service.database.ConstraintModel import set_constraints from context.service.database.ContextModel import ContextModel +from context.service.database.PolicyRuleModel import PolicyRuleModel from context.service.database.DeviceModel import DeviceModel, grpc_to_enum__device_operational_status, set_drivers from context.service.database.EndPointModel import EndPointModel, set_kpi_sample_types from context.service.database.Events import notify_event @@ -61,11 +65,12 @@ METHOD_NAMES = [ 'ListLinkIds', 'ListLinks', 'GetLink', 'SetLink', 'RemoveLink', 'GetLinkEvents', 'ListServiceIds', 'ListServices', 'GetService', 'SetService', 'RemoveService', 'GetServiceEvents', 'ListSliceIds', 'ListSlices', 'GetSlice', 'SetSlice', 'RemoveSlice', 'GetSliceEvents', + 'ListPolicyRuleIds', 'ListPolicyRules', 'GetPolicyRule', 'SetPolicyRule', 'RemovePolicyRule', 'UnsetService', 'UnsetSlice', ] METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) -class ContextServiceServicerImpl(ContextServiceServicer): +class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceServicer): def __init__(self, database : Database, messagebroker : MessageBroker): LOGGER.debug('Creating Servicer...') self.lock = threading.Lock() @@ -606,6 +611,8 @@ class ContextServiceServicerImpl(ContextServiceServicer): 'slice_constraints_fk': db_constraints, 'slice_status' : grpc_to_enum__slice_status(request.slice_status.slice_status), 'slice_config_fk' : db_running_config, + 'slice_owner_uuid' : request.slice_owner.owner_uuid.uuid, + 'slice_owner_string' : request.slice_owner.owner_string, }) db_slice, updated = result @@ -622,7 +629,7 @@ class ContextServiceServicerImpl(ContextServiceServicer): db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) - str_slice_endpoint_key = key_to_str([slice_uuid, str_endpoint_key], separator='--') + str_slice_endpoint_key = key_to_str([str_slice_key, str_endpoint_key], separator='--') result : Tuple[SliceEndPointModel, bool] = get_or_create_object( self.database, SliceEndPointModel, str_slice_endpoint_key, { 'slice_fk': db_slice, 'endpoint_fk': db_endpoint}) @@ -811,3 +818,56 @@ class ContextServiceServicerImpl(ContextServiceServicer): def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT): yield ConnectionEvent(**json.loads(message.content)) + + + # ----- Policy ----------------------------------------------------------------------------------------------------- + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListPolicyRuleIds(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleIdList: + with self.lock: + db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) + db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) + return PolicyRuleIdList(policyRuleIdList=[db_policy_rule.dump_id() for db_policy_rule in db_policy_rules]) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListPolicyRules(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleList: + with self.lock: + db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) + db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) + return PolicyRuleList(policyRules=[db_policy_rule.dump() for db_policy_rule in db_policy_rules]) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetPolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule: + with self.lock: + policy_rule_uuid = request.uuid.uuid + db_policy_rule: PolicyRuleModel = get_object(self.database, PolicyRuleModel, policy_rule_uuid) + return PolicyRule(**db_policy_rule.dump()) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def SetPolicyRule(self, request: PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId: + with self.lock: + policy_rule_type = request.WhichOneof('policy_rule') + policy_rule_json = grpc_message_to_json(request) + policy_rule_uuid = policy_rule_json[policy_rule_type]['policyRuleBasic']['policyRuleId']['uuid']['uuid'] + result: Tuple[PolicyRuleModel, bool] = update_or_create_object( + self.database, PolicyRuleModel, policy_rule_uuid, {'value': json.dumps(policy_rule_json)}) + db_policy, updated = result # pylint: disable=unused-variable + + #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + dict_policy_id = db_policy.dump_id() + #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id}) + return PolicyRuleId(**dict_policy_id) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RemovePolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> Empty: + with self.lock: + policy_uuid = request.uuid.uuid + db_policy = PolicyRuleModel(self.database, policy_uuid, auto_load=False) + found = db_policy.load() + if not found: return Empty() + + dict_policy_id = db_policy.dump_id() + db_policy.delete() + #event_type = EventTypeEnum.EVENTTYPE_REMOVE + #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id}) + return Empty() diff --git a/src/context/service/rest_server/Resources.py b/src/context/service/rest_server/Resources.py index d1738edb20361dab70334bc026d94d37c654127a..5f03132a34004388596ce1fdfac470f029c093ea 100644 --- a/src/context/service/rest_server/Resources.py +++ b/src/context/service/rest_server/Resources.py @@ -17,6 +17,7 @@ from flask.json import jsonify from flask_restful import Resource from common.orm.Database import Database from common.proto.context_pb2 import ConnectionId, ContextId, DeviceId, Empty, LinkId, ServiceId, SliceId, TopologyId +from common.proto.policy_pb2 import PolicyRuleId from common.tools.grpc.Tools import grpc_message_to_json from context.service.grpc_server.ContextServiceServicerImpl import ContextServiceServicerImpl @@ -61,6 +62,11 @@ def grpc_topology_id(context_uuid, topology_uuid): 'topology_uuid': {'uuid': topology_uuid} }) +def grpc_policy_rule_id(policy_rule_uuid): + return PolicyRuleId(**{ + 'uuid': {'uuid': policy_rule_uuid} + }) + class _Resource(Resource): def __init__(self, database : Database) -> None: super().__init__() @@ -151,6 +157,18 @@ class Connection(_Resource): def get(self, connection_uuid : str): return format_grpc_to_json(self.servicer.GetConnection(grpc_connection_id(connection_uuid), None)) +class PolicyRuleIds(_Resource): + def get(self): + return format_grpc_to_json(self.servicer.ListPolicyRuleIds(Empty(), None)) + +class PolicyRules(_Resource): + def get(self): + return format_grpc_to_json(self.servicer.ListPolicyRules(Empty(), None)) + +class PolicyRule(_Resource): + def get(self, policy_rule_uuid : str): + return format_grpc_to_json(self.servicer.GetPolicyRule(grpc_policy_rule_id(policy_rule_uuid), None)) + class DumpText(Resource): def __init__(self, database : Database) -> None: super().__init__() @@ -219,6 +237,10 @@ RESOURCES = [ ('api.connections', Connections, '/context/<string:context_uuid>/service/<path:service_uuid>/connections'), ('api.connection', Connection, '/connection/<path:connection_uuid>'), + ('api.policyrule_ids', PolicyRuleIds, '/policyrule_ids'), + ('api.policyrules', PolicyRules, '/policyrules'), + ('api.policyrule', PolicyRule, '/policyrule/<string:policyrule_uuid>'), + ('api.dump.text', DumpText, '/dump/text'), ('api.dump.html', DumpHtml, '/dump/html'), ] diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py index 140cbff686eaf5b430f23ee987a9335ecb04c0f5..1cf929cfa578e8bbf8f95885cc2a7bc7e7b9f3ef 100644 --- a/src/context/tests/Objects.py +++ b/src/context/tests/Objects.py @@ -23,6 +23,7 @@ from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id from common.tools.object_factory.Link import json_link, json_link_id from common.tools.object_factory.Service import json_service_id, json_service_l3nm_planned from common.tools.object_factory.Topology import json_topology, json_topology_id +from common.tools.object_factory.PolicyRule import json_policy_rule, json_policy_rule_id # ----- Context -------------------------------------------------------------------------------------------------------- @@ -197,3 +198,9 @@ CONNECTION_R1_R3_SVCIDS = [SERVICE_R1_R2_ID, SERVICE_R2_R3_ID] CONNECTION_R1_R3 = json_connection( CONNECTION_R1_R3_UUID, service_id=SERVICE_R1_R3_ID, path_hops_endpoint_ids=CONNECTION_R1_R3_EPIDS, sub_service_ids=CONNECTION_R1_R3_SVCIDS) + + +# ----- PolicyRule ------------------------------------------------------------------------------------------------------- +POLICY_RULE_UUID = '56380225-3e40-4f74-9162-529f8dcb96a1' +POLICY_RULE_ID = json_policy_rule_id(POLICY_RULE_UUID) +POLICY_RULE = json_policy_rule(POLICY_RULE_UUID) diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py index 3109ef13dea98d4a56d661871b1c38ee2296f890..022c0472039d526e488f8a69096fae8c0edbdb48 100644 --- a/src/context/tests/test_unitary.py +++ b/src/context/tests/test_unitary.py @@ -27,6 +27,7 @@ from common.proto.context_pb2 import ( Connection, ConnectionEvent, ConnectionId, Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, DeviceOperationalStatusEnum, Empty, EventTypeEnum, Link, LinkEvent, LinkId, Service, ServiceEvent, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyEvent, TopologyId) +from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule) from common.type_checkers.Assertions import ( validate_connection, validate_connection_ids, validate_connections, validate_context, validate_context_ids, validate_contexts, validate_device, validate_device_ids, validate_devices, validate_link, validate_link_ids, @@ -44,7 +45,8 @@ from .Objects import ( CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, DEVICE_R3, DEVICE_R3_ID, DEVICE_R3_UUID, LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R2_UUID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, SERVICE_R1_R3, - SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, TOPOLOGY, TOPOLOGY_ID) + SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, TOPOLOGY, TOPOLOGY_ID, + POLICY_RULE, POLICY_RULE_ID, POLICY_RULE_UUID) LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) @@ -68,8 +70,8 @@ REDIS_CONFIG = { } SCENARIOS = [ - ('all_inmemory', DatabaseBackendEnum.INMEMORY, {}, MessageBrokerBackendEnum.INMEMORY, {} ), - ('all_redis', DatabaseBackendEnum.REDIS, REDIS_CONFIG, MessageBrokerBackendEnum.REDIS, REDIS_CONFIG), + ('all_inmemory', DatabaseBackendEnum.INMEMORY, {}, MessageBrokerBackendEnum.INMEMORY, {} ) +# ('all_redis', DatabaseBackendEnum.REDIS, REDIS_CONFIG, MessageBrokerBackendEnum.REDIS, REDIS_CONFIG), ] @pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS) @@ -1169,6 +1171,101 @@ def test_grpc_connection( assert len(db_entries) == 0 +def test_grpc_policy( + context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name + context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + context_database = context_db_mb[0] + + # ----- Clean the database ----------------------------------------------------------------------------------------- + context_database.clear_all() + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + #events_collector = EventsCollector(context_client_grpc) + #events_collector.start() + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + POLICY_ID = 'no-uuid' + DEFAULT_POLICY_ID = {'uuid': {'uuid': POLICY_ID}} + + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetPolicyRule(PolicyRuleId(**DEFAULT_POLICY_ID)) + + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'PolicyRule({:s}) not found'.format(POLICY_ID) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListPolicyRuleIds(Empty()) + assert len(response.policyRuleIdList) == 0 + + response = context_client_grpc.ListPolicyRules(Empty()) + assert len(response.policyRules) == 0 + + # ----- Dump state of database before create the object ------------------------------------------------------------ + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 + + # ----- Create the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE)) + assert response.uuid.uuid == POLICY_RULE_UUID + + # ----- Check create event ----------------------------------------------------------------------------------------- + # events = events_collector.get_events(block=True, count=1) + # assert isinstance(events[0], PolicyEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID + + # ----- Update the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE)) + assert response.uuid.uuid == POLICY_RULE_UUID + + # ----- Dump state of database after create/update the object ------------------------------------------------------ + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 2 + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetPolicyRule(PolicyRuleId(**POLICY_RULE_ID)) + assert response.device.policyRuleBasic.policyRuleId.uuid.uuid == POLICY_RULE_UUID + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListPolicyRuleIds(Empty()) + assert len(response.policyRuleIdList) == 1 + assert response.policyRuleIdList[0].uuid.uuid == POLICY_RULE_UUID + + response = context_client_grpc.ListPolicyRules(Empty()) + assert len(response.policyRules) == 1 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client_grpc.RemovePolicyRule(PolicyRuleId(**POLICY_RULE_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + # events = events_collector.get_events(block=True, count=2) + + # assert isinstance(events[0], PolicyEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID + + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + # events_collector.stop() + + # ----- Dump state of database after remove the object ------------------------------------------------------------- + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 + + + # ----- Test REST API methods ------------------------------------------------------------------------------------------ def test_rest_populate_database( @@ -1224,6 +1321,22 @@ def test_rest_get_service(context_service_rest : RestServer): # pylint: disable= reply = do_rest_request('/context/{:s}/service/{:s}'.format(context_uuid, service_uuid)) validate_service(reply) +def test_rest_get_slice_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/slice_ids'.format(context_uuid)) + #validate_slice_ids(reply) + +def test_rest_get_slices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/slices'.format(context_uuid)) + #validate_slices(reply) + +#def test_rest_get_slice(context_service_rest : RestServer): # pylint: disable=redefined-outer-name +# context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) +# slice_uuid = urllib.parse.quote(SLICE_R1_R2_UUID, safe='') +# reply = do_rest_request('/context/{:s}/slice/{:s}'.format(context_uuid, slice_uuid)) +# #validate_slice(reply) + def test_rest_get_device_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name reply = do_rest_request('/device_ids') validate_device_ids(reply) @@ -1267,6 +1380,19 @@ def test_rest_get_connection(context_service_rest : RestServer): # pylint: disab reply = do_rest_request('/connection/{:s}'.format(connection_uuid)) validate_connection(reply) +def test_rest_get_policyrule_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/policyrule_ids') + #validate_policyrule_ids(reply) + +def test_rest_get_policyrules(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/policyrules') + #validate_policyrules(reply) + +#def test_rest_get_policyrule(context_service_rest : RestServer): # pylint: disable=redefined-outer-name +# policyrule_uuid = urllib.parse.quote(POLICYRULE_UUID, safe='') +# reply = do_rest_request('/policyrule/{:s}'.format(policyrule_uuid)) +# #validate_policyrule(reply) + # ----- Test misc. Context internal tools ------------------------------------------------------------------------------ diff --git a/src/device/requirements.in b/src/device/requirements.in index 9c8c0ef18f3bcd4a92180465d11cd465c4336d44..2b9c199c86a580b72190a9d0e74a161e567abed2 100644 --- a/src/device/requirements.in +++ b/src/device/requirements.in @@ -9,6 +9,7 @@ python-json-logger==2.0.2 pytz==2021.3 redis==4.1.2 requests==2.27.1 +requests-mock==1.9.3 xmltodict==0.12.0 tabulate ipaddress diff --git a/src/device/service/database/DeviceModel.py b/src/device/service/database/DeviceModel.py index 7a0a2325928ed7312063eb66d629a08cc7591b7b..9dd63d36efebf135b7bb38845d917bc9e03dc100 100644 --- a/src/device/service/database/DeviceModel.py +++ b/src/device/service/database/DeviceModel.py @@ -35,6 +35,7 @@ class ORM_DeviceDriverEnum(Enum): P4 = DeviceDriverEnum.DEVICEDRIVER_P4 IETF_NETWORK_TOPOLOGY = DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY ONF_TR_352 = DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352 + XR = DeviceDriverEnum.DEVICEDRIVER_XR grpc_to_enum__device_driver = functools.partial( grpc_to_enum, DeviceDriverEnum, ORM_DeviceDriverEnum) diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py index 535b553a81c24ea12c4b9fefc1563541e45f77fa..4e4a9ac11363958fb4609976ce8609745bb97c01 100644 --- a/src/device/service/drivers/__init__.py +++ b/src/device/service/drivers/__init__.py @@ -29,7 +29,7 @@ DRIVERS.append( { FilterFieldEnum.DEVICE_TYPE: [ DeviceTypeEnum.EMULATED_DATACENTER, - DeviceTypeEnum.EMULATED_MICROVAWE_RADIO_SYSTEM, + DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM, DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM, DeviceTypeEnum.EMULATED_OPTICAL_ROADM, DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER, @@ -38,7 +38,7 @@ DRIVERS.append( DeviceTypeEnum.EMULATED_PACKET_SWITCH, #DeviceTypeEnum.DATACENTER, - #DeviceTypeEnum.MICROVAWE_RADIO_SYSTEM, + #DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM, #DeviceTypeEnum.OPEN_LINE_SYSTEM, #DeviceTypeEnum.OPTICAL_ROADM, #DeviceTypeEnum.OPTICAL_TRANSPONDER, @@ -54,7 +54,7 @@ DRIVERS.append( # # Emulated devices, all drivers => use Emulated # FilterFieldEnum.DEVICE_TYPE: [ # DeviceTypeEnum.EMULATED_DATACENTER, - # DeviceTypeEnum.EMULATED_MICROVAWE_RADIO_SYSTEM, + # DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM, # DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM, # DeviceTypeEnum.EMULATED_OPTICAL_ROADM, # DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER, @@ -111,7 +111,18 @@ if LOAD_ALL_DEVICE_DRIVERS: DRIVERS.append( (IETFApiDriver, [ { - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.MICROVAWE_RADIO_SYSTEM, + FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM, FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.IETF_NETWORK_TOPOLOGY, } ])) + +if LOAD_ALL_DEVICE_DRIVERS: + from .xr.XrDriver import XrDriver # pylint: disable=wrong-import-position + DRIVERS.append( + (XrDriver, [ + { + # Close enough, it does optical switching + FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.XR_CONSTELLATION, + FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.XR, + } + ])) diff --git a/src/device/service/drivers/microwave/Tools.py b/src/device/service/drivers/microwave/Tools.py index 93498f72d4646e1837e934903b6b3e2da6e52259..4f74def4dd6c370a9d2bf07b1fbe85670f5c2956 100644 --- a/src/device/service/drivers/microwave/Tools.py +++ b/src/device/service/drivers/microwave/Tools.py @@ -17,6 +17,12 @@ from device.service.driver_api._Driver import RESOURCE_ENDPOINTS LOGGER = logging.getLogger(__name__) +HTTP_OK_CODES = { + 200, # OK + 201, # Created + 202, # Accepted + 204, # No Content +} def find_key(resource, key): return json.loads(resource[1])[key] @@ -128,10 +134,10 @@ def create_connectivity_service( LOGGER.exception('Exception creating ConnectivityService(uuid={:s}, data={:s})'.format(str(uuid), str(data))) results.append(e) else: - if response.status_code != 201: + if response.status_code not in HTTP_OK_CODES: msg = 'Could not create ConnectivityService(uuid={:s}, data={:s}). status_code={:s} reply={:s}' LOGGER.error(msg.format(str(uuid), str(data), str(response.status_code), str(response))) - results.append(response.status_code == 201) + results.append(response.status_code in HTTP_OK_CODES) return results def delete_connectivity_service(root_url, timeout, uuid): @@ -144,8 +150,8 @@ def delete_connectivity_service(root_url, timeout, uuid): LOGGER.exception('Exception deleting ConnectivityService(uuid={:s})'.format(str(uuid))) results.append(e) else: - if response.status_code != 201: + if response.status_code not in HTTP_OK_CODES: msg = 'Could not delete ConnectivityService(uuid={:s}). status_code={:s} reply={:s}' LOGGER.error(msg.format(str(uuid), str(response.status_code), str(response))) - results.append(response.status_code == 202) + results.append(response.status_code in HTTP_OK_CODES) return results diff --git a/src/device/service/drivers/p4/p4_driver.py b/src/device/service/drivers/p4/p4_driver.py index 069c07ce40e43192b74519b2175e7e10c638cd20..b8ff795fbd9466874b07f1f752fce682ea741111 100644 --- a/src/device/service/drivers/p4/p4_driver.py +++ b/src/device/service/drivers/p4/p4_driver.py @@ -28,7 +28,7 @@ from .p4_common import matches_ipv4, matches_ipv6, valid_port,\ P4_ATTR_DEV_P4BIN, P4_ATTR_DEV_P4INFO, P4_ATTR_DEV_TIMEOUT,\ P4_VAL_DEF_VENDOR, P4_VAL_DEF_HW_VER, P4_VAL_DEF_SW_VER,\ P4_VAL_DEF_TIMEOUT -from .p4_manager import P4Manager, get_api_version, KEY_TABLE,\ +from .p4_manager import P4Manager, KEY_TABLE,\ KEY_ACTION_PROFILE, KEY_COUNTER, KEY_DIR_COUNTER, KEY_METER, KEY_DIR_METER,\ KEY_CTL_PKT_METADATA from .p4_client import WriteOperation @@ -127,8 +127,7 @@ class P4Driver(_Driver): except Exception as ex: # pylint: disable=broad-except raise Exception(ex) from ex - LOGGER.info("\tConnected via P4Runtime version %s", - get_api_version()) + LOGGER.info("\tConnected via P4Runtime") self.__started.set() return True diff --git a/src/device/service/drivers/p4/p4_manager.py b/src/device/service/drivers/p4/p4_manager.py index 65f8602ea30fa2d8cd06b09655ee4ee63d045a97..178487250ea3a5652690fb39f1631a0133aec4e3 100644 --- a/src/device/service/drivers/p4/p4_manager.py +++ b/src/device/service/drivers/p4/p4_manager.py @@ -55,7 +55,7 @@ LOGGER = logging.getLogger(__name__) CONTEXT = Context() # Global P4Runtime client -CLIENT = None +CLIENTS = {} # Constant P4 entities KEY_TABLE = "table" @@ -76,25 +76,6 @@ def get_context(): """ return CONTEXT - -def get_client(): - """ - Return P4 client. - - :return: P4Runtime client object - """ - return CLIENT - - -def get_api_version(): - """ - Get the supported P4Runtime API version. - - :return: API version - """ - return CLIENT.api_version() - - def get_table_type(table): """ Assess the type of P4 table based upon the matching scheme. @@ -136,171 +117,28 @@ def match_type_to_str(match_type): return None -def insert_table_entry_exact( - table_name, match_map, action_name, action_params, metadata, - cnt_pkt=-1, cnt_byte=-1): - """ - Insert an entry into an exact match table. - - :param table_name: P4 table name - :param match_map: Map of match operations - :param action_name: Action name - :param action_params: Map of action parameters - :param metadata: table metadata - :param cnt_pkt: packet count - :param cnt_byte: byte count - :return: inserted entry - """ - assert match_map, "Table entry without match operations is not accepted" - assert action_name, "Table entry without action is not accepted" - - table_entry = TableEntry(table_name)(action=action_name) - - for match_k, match_v in match_map.items(): - table_entry.match[match_k] = match_v - - for action_k, action_v in action_params.items(): - table_entry.action[action_k] = action_v - - if metadata: - table_entry.metadata = metadata - - if cnt_pkt > 0: - table_entry.counter_data.packet_count = cnt_pkt - - if cnt_byte > 0: - table_entry.counter_data.byte_count = cnt_byte - - ex_msg = "" - try: - table_entry.insert() - LOGGER.info("Inserted exact table entry: %s", table_entry) - except (P4RuntimeException, P4RuntimeWriteException) as ex: - raise P4RuntimeException from ex - - # Table entry exists, needs to be modified - if "ALREADY_EXISTS" in ex_msg: - table_entry.modify() - LOGGER.info("Updated exact table entry: %s", table_entry) - - return table_entry - - -def insert_table_entry_ternary( - table_name, match_map, action_name, action_params, metadata, - priority, cnt_pkt=-1, cnt_byte=-1): - """ - Insert an entry into a ternary match table. - - :param table_name: P4 table name - :param match_map: Map of match operations - :param action_name: Action name - :param action_params: Map of action parameters - :param metadata: table metadata - :param priority: entry priority - :param cnt_pkt: packet count - :param cnt_byte: byte count - :return: inserted entry - """ - assert match_map, "Table entry without match operations is not accepted" - assert action_name, "Table entry without action is not accepted" - - table_entry = TableEntry(table_name)(action=action_name) - - for match_k, match_v in match_map.items(): - table_entry.match[match_k] = match_v - - for action_k, action_v in action_params.items(): - table_entry.action[action_k] = action_v - - table_entry.priority = priority - - if metadata: - table_entry.metadata = metadata - - if cnt_pkt > 0: - table_entry.counter_data.packet_count = cnt_pkt - - if cnt_byte > 0: - table_entry.counter_data.byte_count = cnt_byte - - ex_msg = "" - try: - table_entry.insert() - LOGGER.info("Inserted ternary table entry: %s", table_entry) - except (P4RuntimeException, P4RuntimeWriteException) as ex: - raise P4RuntimeException from ex - - # Table entry exists, needs to be modified - if "ALREADY_EXISTS" in ex_msg: - table_entry.modify() - LOGGER.info("Updated ternary table entry: %s", table_entry) - - return table_entry - - -def insert_table_entry_range( - table_name, match_map, action_name, action_params, metadata, - priority, cnt_pkt=-1, cnt_byte=-1): # pylint: disable=unused-argument - """ - Insert an entry into a range match table. - - :param table_name: P4 table name - :param match_map: Map of match operations - :param action_name: Action name - :param action_params: Map of action parameters - :param metadata: table metadata - :param priority: entry priority - :param cnt_pkt: packet count - :param cnt_byte: byte count - :return: inserted entry - """ - assert match_map, "Table entry without match operations is not accepted" - assert action_name, "Table entry without action is not accepted" - - raise NotImplementedError( - "Range-based table insertion not implemented yet") - - -def insert_table_entry_optional( - table_name, match_map, action_name, action_params, metadata, - priority, cnt_pkt=-1, cnt_byte=-1): # pylint: disable=unused-argument - """ - Insert an entry into an optional match table. - - :param table_name: P4 table name - :param match_map: Map of match operations - :param action_name: Action name - :param action_params: Map of action parameters - :param metadata: table metadata - :param priority: entry priority - :param cnt_pkt: packet count - :param cnt_byte: byte count - :return: inserted entry - """ - assert match_map, "Table entry without match operations is not accepted" - assert action_name, "Table entry without action is not accepted" - - raise NotImplementedError( - "Optional-based table insertion not implemented yet") - class P4Manager: """ Class to manage the runtime entries of a P4 pipeline. """ + local_client = None + key_id = None def __init__(self, device_id: int, ip_address: str, port: int, election_id: tuple, role_name=None, ssl_options=None): - global CLIENT + global CLIENTS self.__id = device_id self.__ip_address = ip_address self.__port = int(port) self.__endpoint = f"{self.__ip_address}:{self.__port}" - CLIENT = P4RuntimeClient( + self.key_id = ip_address+str(port) + CLIENTS[self.key_id] = P4RuntimeClient( self.__id, self.__endpoint, election_id, role_name, ssl_options) self.__p4info = None + + self.local_client = CLIENTS[self.key_id] # Internal memory for whitebox management # | -> P4 entities @@ -339,27 +177,27 @@ class P4Manager: # Forwarding pipeline is only set iff both files are present if p4bin_path and p4info_path: try: - CLIENT.set_fwd_pipe_config(p4info_path, p4bin_path) + self.local_client.set_fwd_pipe_config(p4info_path, p4bin_path) except FileNotFoundError as ex: LOGGER.critical(ex) - CLIENT.tear_down() + self.local_client.tear_down() raise FileNotFoundError(ex) from ex except P4RuntimeException as ex: LOGGER.critical("Error when setting config") LOGGER.critical(ex) - CLIENT.tear_down() + self.local_client.tear_down() raise P4RuntimeException(ex) from ex except Exception as ex: # pylint: disable=broad-except LOGGER.critical("Error when setting config") - CLIENT.tear_down() + self.local_client.tear_down() raise Exception(ex) from ex try: - self.__p4info = CLIENT.get_p4info() + self.__p4info = self.local_client.get_p4info() except P4RuntimeException as ex: LOGGER.critical("Error when retrieving P4Info") LOGGER.critical(ex) - CLIENT.tear_down() + self.local_client.tear_down() raise P4RuntimeException(ex) from ex CONTEXT.set_p4info(self.__p4info) @@ -375,14 +213,15 @@ class P4Manager: :return: void """ - global CLIENT + global CLIENTS # gRPC client must already be instantiated - assert CLIENT + assert self.local_client # Trigger connection tear down with the P4Runtime server - CLIENT.tear_down() - CLIENT = None + self.local_client.tear_down() + # Remove client entry from global dictionary + CLIENTS.pop(self.key_id) self.__clear() LOGGER.info("P4Runtime manager stopped") @@ -723,7 +562,7 @@ class P4Manager: try: for count, table_entry in enumerate( - TableEntry(table_name)(action=action_name).read()): + TableEntry(self.local_client, table_name)(action=action_name).read()): LOGGER.debug( "Table %s - Entry %d\n%s", table_name, count, table_entry) self.table_entries[table_name].append(table_entry) @@ -856,6 +695,154 @@ class P4Manager: ) return None + def insert_table_entry_exact(self, + table_name, match_map, action_name, action_params, metadata, + cnt_pkt=-1, cnt_byte=-1): + """ + Insert an entry into an exact match table. + + :param table_name: P4 table name + :param match_map: Map of match operations + :param action_name: Action name + :param action_params: Map of action parameters + :param metadata: table metadata + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + assert match_map, "Table entry without match operations is not accepted" + assert action_name, "Table entry without action is not accepted" + + table_entry = TableEntry(self.local_client, table_name)(action=action_name) + + for match_k, match_v in match_map.items(): + table_entry.match[match_k] = match_v + + for action_k, action_v in action_params.items(): + table_entry.action[action_k] = action_v + + if metadata: + table_entry.metadata = metadata + + if cnt_pkt > 0: + table_entry.counter_data.packet_count = cnt_pkt + + if cnt_byte > 0: + table_entry.counter_data.byte_count = cnt_byte + + ex_msg = "" + try: + table_entry.insert() + LOGGER.info("Inserted exact table entry: %s", table_entry) + except (P4RuntimeException, P4RuntimeWriteException) as ex: + raise P4RuntimeException from ex + + # Table entry exists, needs to be modified + if "ALREADY_EXISTS" in ex_msg: + table_entry.modify() + LOGGER.info("Updated exact table entry: %s", table_entry) + + return table_entry + + + def insert_table_entry_ternary(self, + table_name, match_map, action_name, action_params, metadata, + priority, cnt_pkt=-1, cnt_byte=-1): + """ + Insert an entry into a ternary match table. + + :param table_name: P4 table name + :param match_map: Map of match operations + :param action_name: Action name + :param action_params: Map of action parameters + :param metadata: table metadata + :param priority: entry priority + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + assert match_map, "Table entry without match operations is not accepted" + assert action_name, "Table entry without action is not accepted" + + table_entry = TableEntry(self.local_client, table_name)(action=action_name) + + for match_k, match_v in match_map.items(): + table_entry.match[match_k] = match_v + + for action_k, action_v in action_params.items(): + table_entry.action[action_k] = action_v + + table_entry.priority = priority + + if metadata: + table_entry.metadata = metadata + + if cnt_pkt > 0: + table_entry.counter_data.packet_count = cnt_pkt + + if cnt_byte > 0: + table_entry.counter_data.byte_count = cnt_byte + + ex_msg = "" + try: + table_entry.insert() + LOGGER.info("Inserted ternary table entry: %s", table_entry) + except (P4RuntimeException, P4RuntimeWriteException) as ex: + raise P4RuntimeException from ex + + # Table entry exists, needs to be modified + if "ALREADY_EXISTS" in ex_msg: + table_entry.modify() + LOGGER.info("Updated ternary table entry: %s", table_entry) + + return table_entry + + + def insert_table_entry_range(self, + table_name, match_map, action_name, action_params, metadata, + priority, cnt_pkt=-1, cnt_byte=-1): # pylint: disable=unused-argument + """ + Insert an entry into a range match table. + + :param table_name: P4 table name + :param match_map: Map of match operations + :param action_name: Action name + :param action_params: Map of action parameters + :param metadata: table metadata + :param priority: entry priority + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + assert match_map, "Table entry without match operations is not accepted" + assert action_name, "Table entry without action is not accepted" + + raise NotImplementedError( + "Range-based table insertion not implemented yet") + + + def insert_table_entry_optional(self, + table_name, match_map, action_name, action_params, metadata, + priority, cnt_pkt=-1, cnt_byte=-1): # pylint: disable=unused-argument + """ + Insert an entry into an optional match table. + + :param table_name: P4 table name + :param match_map: Map of match operations + :param action_name: Action name + :param action_params: Map of action parameters + :param metadata: table metadata + :param priority: entry priority + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + assert match_map, "Table entry without match operations is not accepted" + assert action_name, "Table entry without action is not accepted" + + raise NotImplementedError( + "Optional-based table insertion not implemented yet") + def insert_table_entry(self, table_name, match_map, action_name, action_params, priority, metadata=None, cnt_pkt=-1, cnt_byte=-1): @@ -889,26 +876,26 @@ class P4Manager: # Exact match is supported if get_table_type(table) == p4info_pb2.MatchField.EXACT: - return insert_table_entry_exact( + return self.insert_table_entry_exact( table_name, match_map, action_name, action_params, metadata, cnt_pkt, cnt_byte) # Ternary and LPM matches are supported if get_table_type(table) in \ [p4info_pb2.MatchField.TERNARY, p4info_pb2.MatchField.LPM]: - return insert_table_entry_ternary( + return self.insert_table_entry_ternary( table_name, match_map, action_name, action_params, metadata, priority, cnt_pkt, cnt_byte) # TODO: Cover RANGE match # pylint: disable=W0511 if get_table_type(table) == p4info_pb2.MatchField.RANGE: - return insert_table_entry_range( + return self.insert_table_entry_range( table_name, match_map, action_name, action_params, metadata, priority, cnt_pkt, cnt_byte) # TODO: Cover OPTIONAL match # pylint: disable=W0511 if get_table_type(table) == p4info_pb2.MatchField.OPTIONAL: - return insert_table_entry_optional( + return self.insert_table_entry_optional( table_name, match_map, action_name, action_params, metadata, priority, cnt_pkt, cnt_byte) @@ -935,7 +922,7 @@ class P4Manager: LOGGER.error(msg) raise UserError(msg) - table_entry = TableEntry(table_name)(action=action_name) + table_entry = TableEntry(self.local_client, table_name)(action=action_name) for match_k, match_v in match_map.items(): table_entry.match[match_k] = match_v @@ -979,7 +966,7 @@ class P4Manager: LOGGER.error(msg) raise UserError(msg) - TableEntry(table_name).read(function=lambda x: x.delete()) + TableEntry(self.local_client, table_name).read(function=lambda x: x.delete()) LOGGER.info("Deleted all entries from table: %s", table_name) def print_table_entries_spec(self, table_name): @@ -1179,7 +1166,7 @@ class P4Manager: self.counter_entries[cnt_name] = [] try: - for count, cnt_entry in enumerate(CounterEntry(cnt_name).read()): + for count, cnt_entry in enumerate(CounterEntry(self.local_client, cnt_name).read()): LOGGER.debug( "Counter %s - Entry %d\n%s", cnt_name, count, cnt_entry) self.counter_entries[cnt_name].append(cnt_entry) @@ -1298,7 +1285,7 @@ class P4Manager: assert cnt, \ "P4 pipeline does not implement counter " + cnt_name - cnt_entry = CounterEntry(cnt_name) + cnt_entry = CounterEntry(self.local_client, cnt_name) if index: cnt_entry.index = index @@ -1325,7 +1312,7 @@ class P4Manager: assert cnt, \ "P4 pipeline does not implement counter " + cnt_name - cnt_entry = CounterEntry(cnt_name) + cnt_entry = CounterEntry(self.local_client, cnt_name) cnt_entry.clear_data() LOGGER.info("Cleared data of counter entry: %s", cnt_entry) @@ -1394,7 +1381,7 @@ class P4Manager: try: for count, d_cnt_entry in enumerate( - DirectCounterEntry(d_cnt_name).read()): + DirectCounterEntry(self.local_client, d_cnt_name).read()): LOGGER.debug( "Direct counter %s - Entry %d\n%s", d_cnt_name, count, d_cnt_entry) @@ -1530,7 +1517,7 @@ class P4Manager: assert match_map,\ "Direct counter entry without match operations is not accepted" - d_cnt_entry = DirectCounterEntry(d_cnt_name) + d_cnt_entry = DirectCounterEntry(self.local_client, d_cnt_name) for match_k, match_v in match_map.items(): d_cnt_entry.table_entry.match[match_k] = match_v @@ -1559,7 +1546,7 @@ class P4Manager: assert d_cnt, \ "P4 pipeline does not implement direct counter " + d_cnt_name - d_cnt_entry = DirectCounterEntry(d_cnt_name) + d_cnt_entry = DirectCounterEntry(self.local_client, d_cnt_name) d_cnt_entry.clear_data() LOGGER.info("Cleared direct counter entry: %s", d_cnt_entry) @@ -1627,7 +1614,7 @@ class P4Manager: self.meter_entries[meter_name] = [] try: - for count, meter_entry in enumerate(MeterEntry(meter_name).read()): + for count, meter_entry in enumerate(MeterEntry(self.local_client, meter_name).read()): LOGGER.debug( "Meter %s - Entry %d\n%s", meter_name, count, meter_entry) self.meter_entries[meter_name].append(meter_entry) @@ -1756,7 +1743,7 @@ class P4Manager: assert meter, \ "P4 pipeline does not implement meter " + meter_name - meter_entry = MeterEntry(meter_name) + meter_entry = MeterEntry(self.local_client, meter_name) if index: meter_entry.index = index @@ -1789,7 +1776,7 @@ class P4Manager: assert meter, \ "P4 pipeline does not implement meter " + meter_name - meter_entry = MeterEntry(meter_name) + meter_entry = MeterEntry(self.local_client, meter_name) meter_entry.clear_config() LOGGER.info("Cleared meter entry: %s", meter_entry) @@ -1858,7 +1845,7 @@ class P4Manager: try: for count, d_meter_entry in enumerate( - MeterEntry(d_meter_name).read()): + MeterEntry(self.local_client, d_meter_name).read()): LOGGER.debug( "Direct meter %s - Entry %d\n%s", d_meter_name, count, d_meter_entry) @@ -1998,7 +1985,7 @@ class P4Manager: assert match_map,\ "Direct meter entry without match operations is not accepted" - d_meter_entry = DirectMeterEntry(d_meter_name) + d_meter_entry = DirectMeterEntry(self.local_client, d_meter_name) for match_k, match_v in match_map.items(): d_meter_entry.table_entry.match[match_k] = match_v @@ -2031,7 +2018,7 @@ class P4Manager: assert d_meter, \ "P4 pipeline does not implement direct meter " + d_meter_name - d_meter_entry = DirectMeterEntry(d_meter_name) + d_meter_entry = DirectMeterEntry(self.local_client, d_meter_name) d_meter_entry.clear_config() LOGGER.info("Cleared direct meter entry: %s", d_meter_entry) @@ -2100,7 +2087,7 @@ class P4Manager: try: for count, ap_entry in enumerate( - ActionProfileMember(ap_name).read()): + ActionProfileMember(self.local_client, ap_name).read()): LOGGER.debug( "Action profile member %s - Entry %d\n%s", ap_name, count, ap_entry) @@ -2230,7 +2217,7 @@ class P4Manager: assert act_p, \ "P4 pipeline does not implement action profile " + ap_name - ap_member_entry = ActionProfileMember(ap_name)( + ap_member_entry = ActionProfileMember(self.local_client, ap_name)( member_id=member_id, action=action_name) for action_k, action_v in action_params.items(): @@ -2267,7 +2254,7 @@ class P4Manager: assert act_p, \ "P4 pipeline does not implement action profile " + ap_name - ap_member_entry = ActionProfileMember(ap_name)( + ap_member_entry = ActionProfileMember(self.local_client, ap_name)( member_id=member_id, action=action_name) ap_member_entry.delete() LOGGER.info("Deleted action profile member entry: %s", ap_member_entry) @@ -2364,7 +2351,7 @@ class P4Manager: try: for count, ap_entry in enumerate( - ActionProfileGroup(ap_name).read()): + ActionProfileGroup(self.local_client, ap_name).read()): LOGGER.debug("Action profile group %s - Entry %d\n%s", ap_name, count, ap_entry) self.action_profile_groups[ap_name].append(ap_entry) @@ -2483,7 +2470,7 @@ class P4Manager: assert ap, \ "P4 pipeline does not implement action profile " + ap_name - ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id) + ap_group_entry = ActionProfileGroup(self.local_client, ap_name)(group_id=group_id) if members: for m in members: @@ -2519,7 +2506,7 @@ class P4Manager: assert ap, \ "P4 pipeline does not implement action profile " + ap_name - ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id) + ap_group_entry = ActionProfileGroup(self.local_client, ap_name)(group_id=group_id) ap_group_entry.delete() LOGGER.info("Deleted action profile group entry: %s", ap_group_entry) @@ -2537,7 +2524,7 @@ class P4Manager: assert ap, \ "P4 pipeline does not implement action profile " + ap_name - ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id) + ap_group_entry = ActionProfileGroup(self.local_client, ap_name)(group_id=group_id) ap_group_entry.clear() LOGGER.info("Cleared action profile group entry: %s", ap_group_entry) @@ -2631,7 +2618,7 @@ class P4Manager: self.multicast_groups[group_id] = None try: - mcast_group = MulticastGroupEntry(group_id).read() + mcast_group = MulticastGroupEntry(self.local_client, group_id).read() LOGGER.debug("Multicast group %d\n%s", group_id, mcast_group) self.multicast_groups[group_id] = mcast_group return self.multicast_groups[group_id] @@ -2724,7 +2711,7 @@ class P4Manager: assert ports, \ "No multicast group ports are provided" - mcast_group = MulticastGroupEntry(group_id) + mcast_group = MulticastGroupEntry(self.local_client, group_id) for p in ports: mcast_group.add(p, 1) @@ -2756,7 +2743,7 @@ class P4Manager: assert group_id > 0, \ "Multicast group " + group_id + " must be > 0" - mcast_group = MulticastGroupEntry(group_id) + mcast_group = MulticastGroupEntry(self.local_client, group_id) mcast_group.delete() if group_id in self.multicast_groups: @@ -2772,7 +2759,7 @@ class P4Manager: :return: void """ - for mcast_group in MulticastGroupEntry().read(): + for mcast_group in MulticastGroupEntry(self.local_client).read(): gid = mcast_group.group_id mcast_group.delete() del self.multicast_groups[gid] @@ -2828,7 +2815,7 @@ class P4Manager: self.clone_session_entries[session_id] = None try: - session = CloneSessionEntry(session_id).read() + session = CloneSessionEntry(self.local_client, session_id).read() LOGGER.debug("Clone session %d\n%s", session_id, session) self.clone_session_entries[session_id] = session return self.clone_session_entries[session_id] @@ -2923,7 +2910,7 @@ class P4Manager: assert ports, \ "No clone session ports are provided" - session = CloneSessionEntry(session_id) + session = CloneSessionEntry(self.local_client, session_id) for p in ports: session.add(p, 1) @@ -2955,7 +2942,7 @@ class P4Manager: assert session_id > 0, \ "Clone session " + session_id + " must be > 0" - session = CloneSessionEntry(session_id) + session = CloneSessionEntry(self.local_client, session_id) session.delete() if session_id in self.clone_session_entries: @@ -2971,7 +2958,7 @@ class P4Manager: :return: void """ - for e in CloneSessionEntry().read(): + for e in CloneSessionEntry(self.local_client).read(): sid = e.session_id e.delete() del self.clone_session_entries[sid] @@ -3052,7 +3039,7 @@ class P4Manager: "No controller packet metadata in the pipeline\n") return None - packet_in = PacketOut() + packet_in = PacketIn(self.local_client) packet_in.payload = payload if metadata: for name, value in metadata.items(): @@ -3090,7 +3077,7 @@ class P4Manager: _t = Thread(target=_sniff_packet, args=(captured_packet,)) _t.start() # P4Runtime client sends the packet to the switch - CLIENT.stream_in_q["packet"].put(packet_in) + self.local_client.stream_in_q["packet"].put(packet_in) _t.join() LOGGER.info("Packet-in sent: %s", packet_in) @@ -3111,7 +3098,7 @@ class P4Manager: "No controller packet metadata in the pipeline\n") return None - packet_out = PacketOut() + packet_out = PacketOut(self.local_client) packet_out.payload = payload if metadata: for name, value in metadata.items(): @@ -3654,12 +3641,14 @@ class _EntityBase: """ Basic entity. """ + local_client = None - def __init__(self, entity_type, p4runtime_cls, modify_only=False): + def __init__(self, p4_client, entity_type, p4runtime_cls, modify_only=False): self._init = False self._entity_type = entity_type self._entry = p4runtime_cls() self._modify_only = modify_only + self.local_client = p4_client def __dir__(self): d = ["msg", "read"] @@ -3696,7 +3685,7 @@ class _EntityBase: update = p4runtime_pb2.Update() update.type = type_ getattr(update.entity, self._entity_type.name).CopyFrom(self._entry) - CLIENT.write_update(update) + self.local_client.write_update(update) def insert(self): """ @@ -3747,7 +3736,7 @@ class _EntityBase: entity = p4runtime_pb2.Entity() getattr(entity, self._entity_type.name).CopyFrom(self._entry) - iterator = CLIENT.read_one(entity) + iterator = self.local_client.read_one(entity) # Cannot use a (simpler) generator here as we need to # decorate __next__ with @parse_p4runtime_error. @@ -3794,9 +3783,9 @@ class _P4EntityBase(_EntityBase): Basic P4 entity. """ - def __init__(self, p4_type, entity_type, p4runtime_cls, name=None, + def __init__(self, p4_client, p4_type, entity_type, p4runtime_cls, name=None, modify_only=False): - super().__init__(entity_type, p4runtime_cls, modify_only) + super().__init__(p4_client, entity_type, p4runtime_cls, modify_only) self._p4_type = p4_type if name is None: raise UserError( @@ -3825,8 +3814,8 @@ class ActionProfileMember(_P4EntityBase): P4 action profile member. """ - def __init__(self, action_profile_name=None): - super().__init__( + def __init__(self, p4_client, action_profile_name=None): + super().__init__( p4_client, P4Type.action_profile, P4RuntimeEntity.action_profile_member, p4runtime_pb2.ActionProfileMember, action_profile_name) self.member_id = 0 @@ -3991,8 +3980,8 @@ class ActionProfileGroup(_P4EntityBase): P4 action profile group. """ - def __init__(self, action_profile_name=None): - super().__init__( + def __init__(self, p4_client, action_profile_name=None): + super().__init__( p4_client, P4Type.action_profile, P4RuntimeEntity.action_profile_group, p4runtime_pb2.ActionProfileGroup, action_profile_name) self.group_id = 0 @@ -4554,8 +4543,8 @@ class TableEntry(_P4EntityBase): "oneshot": cls._ActionSpecType.ONESHOT, }.get(name, None) - def __init__(self, table_name=None): - super().__init__( + def __init__(self, p4_client, table_name=None): + super().__init__(p4_client, P4Type.table, P4RuntimeEntity.table_entry, p4runtime_pb2.TableEntry, table_name) self.match = MatchKey(table_name, self._info.match_fields) @@ -4996,8 +4985,8 @@ class _CounterEntryBase(_P4EntityBase): Basic P4 counter entry. """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) + def __init__(self, p4_client, *args, **kwargs): + super().__init__(p4_client, *args, **kwargs) self._counter_type = self._info.spec.unit self.packet_count = -1 self.byte_count = -1 @@ -5065,8 +5054,8 @@ class CounterEntry(_CounterEntryBase): P4 counter entry. """ - def __init__(self, counter_name=None): - super().__init__( + def __init__(self, p4_client, counter_name=None): + super().__init__( p4_client, P4Type.counter, P4RuntimeEntity.counter_entry, p4runtime_pb2.CounterEntry, counter_name, modify_only=True) @@ -5126,10 +5115,11 @@ To write to the counter, use <self>.modify class DirectCounterEntry(_CounterEntryBase): """ Direct P4 counter entry. - """ + """ + local_client = None - def __init__(self, direct_counter_name=None): - super().__init__( + def __init__(self, p4_client, direct_counter_name=None): + super().__init__( p4_client, P4Type.direct_counter, P4RuntimeEntity.direct_counter_entry, p4runtime_pb2.DirectCounterEntry, direct_counter_name, modify_only=True) @@ -5140,7 +5130,8 @@ class DirectCounterEntry(_CounterEntryBase): except KeyError as ex: raise InvalidP4InfoError(f"direct_table_id {self._direct_table_id} " f"is not a valid table id") from ex - self._table_entry = TableEntry(self._direct_table_name) + self._table_entry = TableEntry(p4_client, self._direct_table_name) + self.local_client = p4_client self.__doc__ = f""" An entry for direct counter '{direct_counter_name}' @@ -5167,7 +5158,7 @@ To write to the counter, use <self>.modify raise UserError("Direct counters are not index-based") if name == "table_entry": if value is None: - self._table_entry = TableEntry(self._direct_table_name) + self._table_entry = TableEntry(self.local_client, self._direct_table_name) return if not isinstance(value, TableEntry): raise UserError("table_entry must be an instance of TableEntry") @@ -5221,7 +5212,7 @@ class _MeterEntryBase(_P4EntityBase): Basic P4 meter entry. """ - def __init__(self, *args, **kwargs): + def __init__(self, p4_client, *args, **kwargs): super().__init__(*args, **kwargs) self._meter_type = self._info.spec.unit self.index = -1 @@ -5291,8 +5282,8 @@ class MeterEntry(_MeterEntryBase): P4 meter entry. """ - def __init__(self, meter_name=None): - super().__init__( + def __init__(self, p4_client, meter_name=None): + super().__init__(p4_client, P4Type.meter, P4RuntimeEntity.meter_entry, p4runtime_pb2.MeterEntry, meter_name, modify_only=True) @@ -5356,9 +5347,10 @@ class DirectMeterEntry(_MeterEntryBase): """ Direct P4 meter entry. """ + local_client = None - def __init__(self, direct_meter_name=None): - super().__init__( + def __init__(self, p4_client, direct_meter_name=None): + super().__init__(p4_client, P4Type.direct_meter, P4RuntimeEntity.direct_meter_entry, p4runtime_pb2.DirectMeterEntry, direct_meter_name, modify_only=True) @@ -5369,7 +5361,8 @@ class DirectMeterEntry(_MeterEntryBase): except KeyError as ex: raise InvalidP4InfoError(f"direct_table_id {self._direct_table_id} " f"is not a valid table id") from ex - self._table_entry = TableEntry(self._direct_table_name) + self._table_entry = TableEntry(p4_client, self._direct_table_name) + self.local_client = p4_client self.__doc__ = f""" An entry for direct meter '{direct_meter_name}' @@ -5399,7 +5392,7 @@ To write to the meter, use <self>.modify raise UserError("Direct meters are not index-based") if name == "table_entry": if value is None: - self._table_entry = TableEntry(self._direct_table_name) + self._table_entry = TableEntry(self.local_client, self._direct_table_name) return if not isinstance(value, TableEntry): raise UserError("table_entry must be an instance of TableEntry") @@ -5531,8 +5524,8 @@ class MulticastGroupEntry(_EntityBase): P4 multicast group entry. """ - def __init__(self, group_id=0): - super().__init__( + def __init__(self, p4_client, group_id=0): + super().__init__(p4_client, P4RuntimeEntity.packet_replication_engine_entry, p4runtime_pb2.PacketReplicationEngineEntry) self.group_id = group_id @@ -5609,8 +5602,8 @@ class CloneSessionEntry(_EntityBase): P4 clone session entry. """ - def __init__(self, session_id=0): - super().__init__( + def __init__(self, p4_client, session_id=0): + super().__init__(p4_client, P4RuntimeEntity.packet_replication_engine_entry, p4runtime_pb2.PacketReplicationEngineEntry) self.session_id = session_id @@ -5779,8 +5772,9 @@ class PacketIn(): """ P4 packet in. """ + local_client = None - def __init__(self): + def __init__(self, p4_client): ctrl_pkt_md = P4Objects(P4Type.controller_packet_metadata) self.md_info_list = {} if "packet_in" in ctrl_pkt_md: @@ -5788,10 +5782,11 @@ class PacketIn(): for md_info in self.p4_info.metadata: self.md_info_list[md_info.name] = md_info self.packet_in_queue = queue.Queue() + self.local_client = p4_client def _packet_in_recv_func(packet_in_queue): while True: - msg = CLIENT.get_stream_packet("packet", timeout=None) + msg = self.local_client.get_stream_packet("packet", timeout=None) if not msg: break packet_in_queue.put(msg) @@ -5857,8 +5852,9 @@ class PacketOut: """ P4 packet out. """ + local_client = None - def __init__(self, payload=b'', **kwargs): + def __init__(self, p4_client, payload=b'', **kwargs): self.p4_info = P4Objects(P4Type.controller_packet_metadata)[ "packet_out"] @@ -5868,6 +5864,7 @@ class PacketOut: if kwargs: for key, value in kwargs.items(): self.metadata[key] = value + self.local_client = p4_client def _update_msg(self): self._entry = p4runtime_pb2.PacketOut() @@ -5897,7 +5894,7 @@ class PacketOut: self._update_msg() msg = p4runtime_pb2.StreamMessageRequest() msg.packet.CopyFrom(self._entry) - CLIENT.stream_out_q.put(msg) + self.local_client.stream_out_q.put(msg) def str(self): """ @@ -5913,13 +5910,16 @@ class IdleTimeoutNotification(): """ P4 idle timeout notification. """ + + local_client = None - def __init__(self): + def __init__(self, p4_client): self.notification_queue = queue.Queue() + self.local_client = p4_client.local_client def _notification_recv_func(notification_queue): while True: - msg = CLIENT.get_stream_packet("idle_timeout_notification", + msg = self.local_client.get_stream_packet("idle_timeout_notification", timeout=None) if not msg: break diff --git a/src/device/service/drivers/xr/README_XR.md b/src/device/service/drivers/xr/README_XR.md new file mode 100644 index 0000000000000000000000000000000000000000..f7c2316ce5fa810969d373e1fad7bc5ca83b9e49 --- /dev/null +++ b/src/device/service/drivers/xr/README_XR.md @@ -0,0 +1,149 @@ +# Infinera Readme + +There are some instructions at https://labs.etsi.org/rep/tfs/controller/-/tree/develop/tutorial . They are not completely up to date and don't 100% work. + +Note that many of the scripts expect this and that K8s namespace being used, they are not consistent, so use manual kubectl commands where necessary. + +Infinera repo (cloned from upstream) is https://bitbucket.infinera.com/projects/XRCA/repos/teraflow/browse . The main development branch for us is xr-development (branched of origin/develop). + +## Preliminaries + +Kubernetes must be installed and configured. + +Note that if runninc MicroK8s (I would highly recommend it), then install also regular kubectl so that scripts work. That is, download the kubectl, and also export credidentials to standard location. + +```bash +# As a root +su - +cd /usr/local/bin +curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" +chmod 755 kubectl +exit + +# As your local user +cd ~/.kube +microk8s config > config +``` + +Local Docker registry is needed for build results. Use the following command to start local registry (docker will pull necessary images from Internet) + +```bash +docker run -d -p 32000:5000 --restart=always --name registry registry:2 +``` + +Setup mydeploy script outside the git repo. E.g. following will do. SOURCE IT ON ALL SHELLS. + +IMPORTANT: September 2022 version of controller has a bug where any update to device trigger update to device +until GRPC endpoints are so loaded that K8s kills device service. XR does not need automation service, so it can +be left out. + +```bash +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" +# Without automation service (see note above) +export TFS_COMPONENTS="context device pathcomp service slice compute monitoring webui" +# Correct setting +# export TFS_COMPONENTS="context device automation pathcomp service slice compute monitoring webui" +# Pre-rebase +#export TFS_COMPONENTS="context device automation service compute monitoring webui" +export TFS_IMAGE_TAG="dev" +export TFS_K8S_NAMESPACE="tfs" +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" +export TFS_GRAFANA_PASSWORD="admin123+" +``` + +Build is containerized, pytest used for setup is not. Teraflow has some third party venv suggestion in docs. However standard venv works. Create: + +```bash +python -m venv .venv +source .venv/bin/activate +./install_requirements.sh +``` + +SOURCE VENV ACTIVATE ON ANY SHELL USED FOR PYTHON RELATED WORK (e.g. pytest). + +Use apt-get to install any missing tools (e.g. jq is required). + +For host based Python development (e.g. VS Code) and test script execution, generate protobuf stubs: + +```bash +cd proto +./generate_code_python.sh +cd ../src/context +ln -s ../../proto/src/python proto +``` + +For VS Code python extension imports it is convenient to set file .env to top level with content: + +``` +PYTHONPATH=src +``` +This will make imports to work properly in all cases. + +## Building + +Run deploy script to build in docker containers and then instantiate to configured K8s cluster. Deploy script must be sources for this to work! + +```bash +./deploy.sh +``` + +If protobuf definitions have changed, regenerate version controlled Java files manually +(it is a horrifying bug in build system that this is not automated!). +``` +cd automation +# In case Java is not already installed +sudo apt-get install openjdk-11-jdk -y +export MAVEN_OPTS='--add-exports=java.base/jdk.internal.module=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED' +cd src/policy +./mvnw compile +cd - +cd src/automation +./mvnw compile +``` + +Compilation fails but does update the protobuf generated files. + +## Testing + +Upload descriptors_emulatex_xr.json via WEB UI to setup fake topology. + +Setup service by following commands in src directory. Kubernetes endpoins change on every build, so setup script is mandatory. + +```bash + source tests/ofc22/setup_test_env.sh + python -m pytest --verbose tests/ofc22/tests/test_functional_create_service_xr.py +``` + +Good logs to check are: + +* kubectl logs service/deviceservice --namespace tfs +* kubectl logs service/webuiservice --namespace tfs + +## Unit Tests +Run in src directory (src under repo top level) with command: + +```bash +PYTHONPATH=. pytest device/service/drivers/xr/cm +``` + +The PYTHONPATH is vital for imports to work properly. + +## cm-cli + +The tool cm-cli in the xr driver directory can be use to connect to CM and test the connectivity. For example: + +```bash +./cm-cli.py 172.19.219.44 443 xr-user-1 xr-user-1 --show-constellation-by-hub-name="XR HUB 1" +./cm-cli.py 172.19.219.44 443 xr-user-1 xr-user-1 --list-constellations +./cm-cli.py 172.19.219.44 443 xr-user-1 xr-user-1 --create-connection="FOO;XR HUB 1|XR-T4;XR LEAF 1|XR-T1" +./cm-cli.py 172.19.219.44 443 xr-user-1 xr-user-1 --show-connection-by-name="FooBar123" +./cm-cli.py 172.19.219.44 443 xr-user-1 xr-user-1 --list-connections +# Modify argumens: href;uuid;ifname;ifname +# uuid translates to name TF:uuid +./cm-cli.py 172.19.219.44 443 xr-user-1 xr-user-1 --modify-connection="/network-connections/0637da3b-3b20-4b44-a513-035e6ef897a3;MyCon1;XR HUB 1|XR-T1;XR LEAF 1|XR-T2;25" +./cm-cli.py 172.19.219.44 443 xr-user-1 xr-user-1 --delete-connection=/network-connections/138f0cc0-3dc6-4195-97c0-2cbed5fd59ba + ./cm-cli.py 172.19.219.44 443 xr-user-1 xr-user-1 --create-transport-capacity="FOO;XR HUB 1|XR-T4;XR LEAF 1|XR-T1;12" + ./cm-cli.py 172.19.219.44 443 xr-user-1 xr-user-1 --list-transport-capacities +# Exercise almost full path of SetConfig. Can also be used for changing bandwidth (e.g. in demos) of an service +./cm-cli.py 172.19.219.44 443 xr-user-1 xr-user-1 --emulate-tf-set-config-service="XR HUB 1;teraflow_service_uuid;XR HUB 1|XR-T4;XR LEAF 1|XR-T1;125" +``` diff --git a/src/device/service/drivers/xr/XrDriver.py b/src/device/service/drivers/xr/XrDriver.py new file mode 100644 index 0000000000000000000000000000000000000000..51fd29ad11af5ccdad7e5c49e7d069a1bf2e8ffb --- /dev/null +++ b/src/device/service/drivers/xr/XrDriver.py @@ -0,0 +1,171 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring + +import logging +import threading +import json +from typing import Any, Iterator, List, Optional, Tuple, Union +import urllib3 +from common.type_checkers.Checkers import chk_type +from device.service.driver_api._Driver import _Driver +from .cm.cm_connection import CmConnection +from .cm import tf + +# Don't complain about non-verified SSL certificate. This driver is demo only +# and CM is not provisioned in demos with a proper certificate. +urllib3.disable_warnings() + +LOGGER = logging.getLogger(__name__) + +class XrDriver(_Driver): + def __init__(self, address: str, port: int, **settings) -> None: # pylint: disable=super-init-not-called + self.__lock = threading.Lock() + self.__started = threading.Event() + self.__terminate = threading.Event() + self.__timeout = int(settings.get('timeout', 120)) + self.__cm_address = address + # Mandatory key, an exception will get thrown if missing + self.__hub_module_name = settings["hub_module_name"] + + tls_verify = False # Currently using self signed certificates + username = settings["username"] if "username" in settings else "xr-user-1" + password = settings["password"] if "password" in settings else "xr-user-1" + + self.__cm_connection = CmConnection(address, int(port), username, password, self.__timeout, tls_verify = tls_verify) + self.__constellation = None + + LOGGER.info(f"XrDriver instantiated, cm {address}:{port}, {settings=}") + + def __str__(self): + return f"{self.__hub_module_name}@{self.__cm_address}" + + def Connect(self) -> bool: + LOGGER.info(f"Connect[{self}]") + with self.__lock: + if self.__started.is_set(): + return True + if not self.__cm_connection.Connect(): + return False + else: + self.__started.set() + return True + + def Disconnect(self) -> bool: + LOGGER.info(f"Disconnect[{self}]") + with self.__lock: + self.__terminate.set() + return True + + def GetInitialConfig(self) -> List[Tuple[str, Any]]: + LOGGER.info(f"GetInitialConfig[{self}]") + with self.__lock: + return [] + + #pylint: disable=dangerous-default-value + def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: + LOGGER.info(f"GetConfig[{self}]: {resource_keys=}") + chk_type('resources', resource_keys, list) + + # Empty resource_keys means all resources. As we only have endpoints, we ignore parameter and always + # return everything. + + with self.__lock: + constellation = self.__cm_connection.get_constellation_by_hub_name(self.__hub_module_name) + if constellation: + self.__constellation = constellation + return [(f"/endpoints/endpoint[{ifname}]", {'uuid': ifname, 'type': 'optical', 'sample_types': {}}) for ifname in constellation.ifnames()] + else: + return [] + + def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + LOGGER.info(f"SetConfig[{self}]: {resources=}") + # Logged config seems like: + #[('/service[52ff5f0f-fda4-40bd-a0b1-066f4ff04079:optical]', '{"capacity_unit": "GHz", "capacity_value": 1, "direction": "UNIDIRECTIONAL", "input_sip": "XR HUB 1|XR-T4", "layer_protocol_name": "PHOTONIC_MEDIA", "layer_protocol_qualifier": "tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC", "output_sip": "XR LEAF 1|XR-T1", "uuid": "52ff5f0f-fda4-40bd-a0b1-066f4ff04079:optical"}')] + + with self.__lock: + if self.__constellation is None: + self.__constellation = self.__cm_connection.get_constellation_by_hub_name(self.__hub_module_name) + + if self.__constellation is None: + LOGGER.error("SetConfig: no valid constellation") + return [False] * len(resources) + + results = [] + if len(resources) == 0: + return results + + for key, config in resources: + service_uuid = self.__cm_connection.service_uuid(key) + if service_uuid: + config = json.loads(config) + results.append(tf.set_config_for_service(self.__cm_connection, self.__constellation, service_uuid, config)) + else: + results.append(False) + + return results + + def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + LOGGER.info(f"DeleteConfig[{self}]: {resources=}") + + # Input looks like: + # resources=[('/service[c8a35e81-88d8-4468-9afc-a8abd92a64d0:optical]', '{"uuid": "c8a35e81-88d8-4468-9afc-a8abd92a64d0:optical"}')] + + with self.__lock: + results = [] + if len(resources) == 0: + return results + + # Temporary dummy version + for key, _config in resources: + service_uuid = self.__cm_connection.service_uuid(key) + if service_uuid: + connection = self.__cm_connection.get_connection_by_teraflow_uuid(service_uuid) + if connection is None: + LOGGER.info(f"DeleteConfig: Connection {service_uuid} does not exist, delete is no-op") + results.append(True) + else: + was_deleted = self.__cm_connection.delete_connection(connection.href) + if was_deleted: + LOGGER.info(f"DeleteConfig: Connection {service_uuid} deleted (was {str(connection)})") + else: + LOGGER.info(f"DeleteConfig: Connection {service_uuid} delete failure (was {str(connection)})") + + if self.__constellation.is_vti_mode(): + active_tc = self.__cm_connection.get_transport_capacity_by_teraflow_uuid(service_uuid) + if active_tc is not None: + if self.__cm_connection.delete_transport_capacity(active_tc.href): + LOGGER.info(f"DeleteConfig: Transport Capacity {active_tc} deleted") + else: + LOGGER.error(f"DeleteConfig: Transport Capacity {active_tc} delete failure") + + results.append(was_deleted) + else: + results.append(False) + + return results + + def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: + # Not supported + return [False for _ in subscriptions] + + def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: + # Not supported + return [False for _ in subscriptions] + + def GetState( + self, blocking=False, terminate : Optional[threading.Event] = None + ) -> Iterator[Tuple[float, str, Any]]: + # Not supported + return [] diff --git a/src/device/service/drivers/xr/__init__.py b/src/device/service/drivers/xr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a --- /dev/null +++ b/src/device/service/drivers/xr/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/device/service/drivers/xr/cm-cli.py b/src/device/service/drivers/xr/cm-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..8b8fec59c45f458d802a9ff609c345f55948626e --- /dev/null +++ b/src/device/service/drivers/xr/cm-cli.py @@ -0,0 +1,164 @@ +#!/usr/bin/env python3 +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Test program for CmConnection +import argparse +import logging +import traceback +from typing import Tuple +from cm.cm_connection import CmConnection +from cm.tf_service import TFService +from cm.transport_capacity import TransportCapacity +from cm.connection import Connection +import cm.tf as tf + +logging.basicConfig(level=logging.INFO) + +parser = argparse.ArgumentParser(description='CM Connectin Test Utility') +parser.add_argument('ip', help='CM IP address or domain name') +parser.add_argument('port', help='CM port', type=int) +parser.add_argument('username', help='Username') +parser.add_argument('password', help='Password') + +parser.add_argument('--list-constellations', action='store_true') +parser.add_argument('--show-constellation-by-hub-name', nargs='?', type=str) +parser.add_argument('--create-connection', nargs='?', type=str, help="uuid;ifname;ifname;capacity") +parser.add_argument('--modify-connection', nargs='?', type=str, help="href;uuid;ifname;ifname;capacity") +parser.add_argument('--show-connection-by-name', nargs='?', type=str) +parser.add_argument('--list-connections', action='store_true') +parser.add_argument('--delete-connection', nargs='?', type=str, help="connection id, e.g. \"/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03\"") +parser.add_argument('--list-transport-capacities', action='store_true') +parser.add_argument('--create-transport-capacity', nargs='?', type=str, help="uuid;ifname;ifname;capacity") +parser.add_argument('--emulate-tf-set-config-service', nargs='?', type=str, help="hubmodule;uuid;ifname;ifname;capacity or hubmodule;uuid;ifname;ifname;capacity;FORCE-VTI-ON") + +args = parser.parse_args() + +def cli_create_string_to_tf_service(cli_create_str: str) -> TFService: + sargs = cli_create_str.split(";") + if len(sargs) == 3: + return TFService(*sargs, 0) + if len(sargs) == 4: + sargs[-1] = int(sargs[-1]) + return TFService(*sargs) + print("Invalid object create arguments. Expecting \"oid;ifname1;ifname2;bandwidthgbits\" or \"oid;ifname1;ifname2\", where ifname is form \"MODULE|PORT\"") + exit(-1) + +def cli_modify_string_to_tf_service(cli_create_str: str) -> Tuple[str, TFService]: + sargs = cli_create_str.split(";") + if len(sargs) == 4: + return (sargs[0], TFService(*sargs[1:], 0)) + if len(sargs) == 5: + sargs[-1] = int(sargs[-1]) + return (sargs[0], TFService(*sargs[1:])) + print("Invalid object create arguments. Expecting \"href;oid;ifname1;ifname2;bandwidthgbits\" or \"href;oid;ifname1;ifname2\", where ifname is form \"MODULE|PORT\"") + exit(-1) + +cm = CmConnection(args.ip, args.port, args.username, args.password, tls_verify=False) +if not cm.Connect(): + exit(-1) + +if args.list_constellations: + constellations = cm.list_constellations() + for constellation in constellations: + print("Constellation:", constellation.constellation_id) + for if_name in constellation.ifnames(): + print(f" {if_name}") + +if args.show_constellation_by_hub_name: + constellation = cm.get_constellation_by_hub_name(args.show_constellation_by_hub_name) + if constellation: + print(f"Constellation: {constellation.constellation_id}, traffic-mode: {constellation.traffic_mode}") + for if_name in constellation.ifnames(): + print(f" {if_name}") + +if args.create_connection: + tf_service = cli_create_string_to_tf_service(args.create_connection) + connection = Connection(from_tf_service=tf_service) + created_service = cm.create_connection(connection) + if created_service: + print(f"Created {created_service} for {connection}") + else: + print(f"Failed to create {connection}") + +if args.modify_connection: + href, tf_service = cli_modify_string_to_tf_service(args.modify_connection) + mc_args = args.modify_connection.split(";") + connection = Connection(from_tf_service=tf_service) + result = cm.update_connection(href, connection) + if result: + print(f"Updated {href} for {connection}") + else: + print(f"Failed to update {href} for {connection}") + +if args.show_connection_by_name: + connection = cm.get_connection_by_name(args.show_connection_by_name) + if connection: + print(str(connection)) + +if args.list_connections: + connections = cm.get_connections() + for c in connections: + print(str(c)) + +if args.delete_connection: + was_deleted = cm.delete_connection(args.delete_connection) + if was_deleted: + print(f"Successfully deleted {args.delete_connection}") + else: + print(f"Failed to delete {args.delete_connection}") + +if args.list_transport_capacities: + tcs = cm.get_transport_capacities() + for tc in tcs: + print(str(tc)) + +if args.create_transport_capacity: + tf_service = cli_create_string_to_tf_service(args.create_transport_capacity) + tc = TransportCapacity(from_tf_service=tf_service) + created_service = cm.create_transport_capacity(tc) + if created_service: + print(f"Created {created_service} for {tc}") + else: + print(f"Failed to create {tc}") + +if args.emulate_tf_set_config_service: + eargs = args.emulate_tf_set_config_service.split(";") + if len(eargs) < 5: + print("Mandatory tokens missing for --emulate-tf-set-config-service") + exit(-1) + + hub_module_name, uuid, input_sip, output_sip, capacity_value = eargs[0:5] + capacity_value = int(capacity_value) + config = { + "input_sip": input_sip, + "output_sip": output_sip, + "capacity_value": capacity_value, + "capacity_unit": "gigabit" + } + + constellation = cm.get_constellation_by_hub_name(hub_module_name) + + # Allow testing some of the VTI code before we have CM that has VTI + if len(eargs) > 5 and eargs[5] == "FORCE-VTI-ON": + constellation.traffic_mode = "VTIMode" + + if constellation is None: + print(f"Unable to find constellation for hub-module {hub_module_name}") + exit(-1) + result = tf.set_config_for_service(cm, constellation, uuid, config) + print(f"Emulated SetConfig() for service result: {result}") + if isinstance(result, Exception): + traceback.print_exception(result) diff --git a/src/device/service/drivers/xr/cm/__init__.py b/src/device/service/drivers/xr/cm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a --- /dev/null +++ b/src/device/service/drivers/xr/cm/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/device/service/drivers/xr/cm/cm_connection.py b/src/device/service/drivers/xr/cm/cm_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..7e0fc61b72e7028fae00886cea4dcb2f922bfbf4 --- /dev/null +++ b/src/device/service/drivers/xr/cm/cm_connection.py @@ -0,0 +1,389 @@ +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections.abc +import logging +import json +import time +from typing import Optional, List, Dict, Union +import re +import requests +import urllib3 +from .connection import Connection +from .transport_capacity import TransportCapacity +from .constellation import Constellation + +# https://confluence.infinera.com/display/CR/XR+Network+Service +# https://confluence.infinera.com/pages/viewpage.action?spaceKey=CR&title=XR+Network+Connection+Service#XRNetworkConnectionService-North-boundInterface +# https://bitbucket.infinera.com/projects/XRCM/repos/cm-api/browse/yaml/ncs/v1/ncs.yaml + +LOGGER = logging.getLogger(__name__) + +class ExpiringValue: + def __init__(self, value, expiry): + self.__value = value + self.__expiry = expiry + self.__created = time.monotonic() + + def get_value(self): + return self.__value + + def is_valid_for(self, duration): + if self.__created + self.__expiry >= time.monotonic()+duration: + return True + else: + return False + +class UnexpectedEmptyBody(Exception): + pass + +class HttpResult: + def __init__(self, method: str, url: str, params: Dict[str, any] = None): + self.method = method + self.url = url + self.text = None + self.json = None + self.status_code = None + self.params = params + self.exception = None + + def __str__(self): + status_code = self.status_code if self.status_code is not None else "<not executed>" + return f"{self.method} {self.url} {self.params}, status {status_code}" + + def process_http_response(self, response: requests.Response, permit_empty_body:bool = False): + LOGGER.info(f"process_http_response(): {self.method}: {self.url} qparams={self.params} ==> {response.status_code}") # FIXME: params + self.status_code = response.status_code + if response.content != b'null' and len(response.text): + self.text = response.text + + try: + r_json = json.loads(response.text) + self.json = r_json + except json.JSONDecodeError as json_err: + LOGGER.info(f"{self.method}: {self.url} ==> response json decode error: {str(json_err)}") + self.exception = json_err + elif not permit_empty_body: + raise UnexpectedEmptyBody(f"No body in HTTP response for {self.method} {self.url} (status code {response.status_code}") + + def __bool__(self): + # Error codes start at 400, codes below it are successes + return self.status_code is not None and self.text is not None and self.status_code < 400 and self.exception is None + + def is_valid_with_status_ignore_body(self, expected_status_code: int) -> bool: + return self.status_code is not None and self.status_code == expected_status_code and self.exception is None + + def is_valid_json_with_status(self, expected_status_code: int) -> bool: + return bool(self) and self.status_code == expected_status_code and self.json is not None + + def is_valid_json_list_with_status(self, expected_status_code: int, min_entries=-1, max_entries=-1) -> bool: + if not self.is_valid_json_with_status(expected_status_code): + return False + if not isinstance(self.json, collections.abc.Sequence): + return False + + if min_entries >=0 and len(self.json) < min_entries: + return False + + if max_entries >=0 and len(self.json) > max_entries: + return False + return True + + def is_valid_json_obj_with_status(self, expected_status_code: int) -> bool: + if not self.is_valid_json_with_status(expected_status_code): + return False + if not isinstance(self.json, collections.abc.Mapping): + return False + + return True + +class CmConnection: + def __init__(self, address: str, port: int, username: str, password: str, timeout=30, tls_verify=True) -> None: + self.__tls_verify = tls_verify + if not tls_verify: + urllib3.disable_warnings() + + self.__timeout = timeout + self.__username = username + self.__password = password + self.__cm_root = 'https://' + address + ':' + str(port) + self.__access_token = None + + def __perform_request(self, http_result: HttpResult, permit_empty_body: bool, fn, *args, **kwargs): + try: + response = fn(*args, **kwargs) + http_result.process_http_response(response, permit_empty_body) + except requests.exceptions.Timeout as e: + LOGGER.info(f"{http_result} ==> timeout") + http_result.exception = e + except Exception as e: # pylint: disable=broad-except + es=str(e) + LOGGER.info(f"{http_result} ==> unexpected exception: {es}") + http_result.exception = e + return http_result + + def __post_w_headers(self, path, data, headers, data_as_json=True) -> HttpResult: + url = self.__cm_root + path + rv = HttpResult("POST", url) + if data_as_json: + self.__perform_request(rv, False, requests.post, url, headers=headers, json=data, timeout=self.__timeout, verify=self.__tls_verify) + else: + self.__perform_request(rv, False, requests.post, url, headers=headers, data=data, timeout=self.__timeout, verify=self.__tls_verify) + return rv + + def __post(self, path, data, data_as_json=True) -> HttpResult: + return self.__post_w_headers(path, data, self.__http_headers(), data_as_json=data_as_json) + + def __put(self, path: str, data: Union[str,Dict[str, any]], data_as_json:bool =True, permit_empty_body:bool =True) -> HttpResult: + url = self.__cm_root + path + rv = HttpResult("PUT", url) + if data_as_json: + self.__perform_request(rv, permit_empty_body, requests.put, url, headers=self.__http_headers(), json=data, timeout=self.__timeout, verify=self.__tls_verify) + else: + self.__perform_request(rv, permit_empty_body, requests.put, url, headers=self.__http_headers(), data=data, timeout=self.__timeout, verify=self.__tls_verify) + return rv + + def __get(self, path, params: Dict[str, any]=None) -> HttpResult: + url = self.__cm_root + path + rv = HttpResult("GET", url, params) + self.__perform_request(rv, False, requests.get, url, headers=self.__http_headers(), timeout=self.__timeout,verify=self.__tls_verify, params=params) + return rv + + def __delete(self, path, data=None) -> HttpResult: + url = self.__cm_root + path + rv = HttpResult("DELETE", url) + self.__perform_request(rv, True, requests.delete, url, headers=self.__http_headers(), data=data, timeout=self.__timeout, verify=self.__tls_verify) + return rv + + def __http_headers(self): + self.__ensure_valid_access_token() + if self.__access_token: + return {'Authorization': 'Bearer '+ self.__access_token.get_value()} + else: + return {} + + def __acquire_access_token(self): + path = '/realms/xr-cm/protocol/openid-connect/token' + req = { + "username": self.__username, + "password": self.__password, + "grant_type": "password", + "client_secret": "xr-web-client", + "client_id": "xr-web-client" + } + resp = self.__post_w_headers(path, req, None, data_as_json=False) + # Slightly more verbose check/logging of failures for authentication to help + # diagnose connectivity problems + if resp.status_code is None: + LOGGER.error("Failed to contact authentication API endpoint") + return False + if not resp.is_valid_json_obj_with_status(200): + LOGGER.error(f"Authentication failure, status code {resp.status_code}, data {resp.text}") + return False + if 'access_token' not in resp.json: + LOGGER.error(f"Authentication failure: missing access_token in JSON, status code {resp.status_code}, data {resp.text}") + return False + access_token = resp.json['access_token'] + expires = int(resp.json["expires_in"]) if "expires_in" in resp.json else 0 + LOGGER.info(f"Obtained access token {access_token}, expires in {expires}") + self.__access_token = ExpiringValue(access_token, expires) + return True + + def __ensure_valid_access_token(self): + if not self.__access_token or not self.__access_token.is_valid_for(60): + self.__acquire_access_token() + + def Connect(self) -> bool: + return self.__acquire_access_token() + + def list_constellations(self) -> List[Constellation]: + r = self.__get("/api/v1/ns/xr-networks?content=expanded") + if not r.is_valid_json_list_with_status(200): + return [] + return [Constellation(c) for c in r.json] + + + def get_constellation_by_hub_name(self, hub_module_name: str) -> Optional[Constellation]: + qparams = [ + ('content', 'expanded'), + ('q', '{"hubModule.state.module.moduleName": "' + hub_module_name + '"}') + ] + r = self.__get("/api/v1/ns/xr-networks?content=expanded", params=qparams) + if not r.is_valid_json_list_with_status(200, 1, 1): + return None + return Constellation(r.json[0]) + + def get_transport_capacities(self) -> List[TransportCapacity]: + r= self.__get("/api/v1/ns/transport-capacities?content=expanded") + if not r.is_valid_json_list_with_status(200): + return [] + return [TransportCapacity(from_json=t) for t in r.json] + + def get_transport_capacity_by_name(self, tc_name: str) -> Optional[Connection]: + qparams = [ + ('content', 'expanded'), + ('q', '{"state.name": "' + tc_name + '"}') + ] + r = self.__get("/api/v1/ns/transport-capacities?content=expanded", params=qparams) + if not r.is_valid_json_list_with_status(200, 1, 1): + return TransportCapacity(from_json=r.json[0]) + else: + return None + + def get_transport_capacity_by_teraflow_uuid(self, uuid: str) -> Optional[Connection]: + return self.get_transport_capacity_by_name(f"TF:{uuid}") + + def create_transport_capacity(self, tc: TransportCapacity) -> Optional[str]: + # Create wants a list, so wrap connection to list + tc_config = [tc.create_config()] + resp = self.__post("/api/v1/ns/transport-capacities", tc_config) + if resp.is_valid_json_list_with_status(202, 1, 1) and "href" in resp.json[0]: + tc.href = resp.json[0]["href"] + LOGGER.info(f"Created transport-capcity {tc}") + #LOGGER.info(self.__get(f"/api/v1/ns/transport-capacities{tc.href}?content=expanded")) + return tc.href + else: + return None + + def delete_transport_capacity(self, href: str) -> bool: + resp = self.__delete(f"/api/v1/ns/transport-capacities{href}") + + # Returns empty body + if resp.is_valid_with_status_ignore_body(202): + LOGGER.info(f"Deleted transport-capacity {href=}") + return True + else: + LOGGER.info(f"Deleting transport-capacity {href=} failed, status {resp.status_code}") + return False + + def create_connection(self, connection: Connection) -> Optional[str]: + # Create wants a list, so wrap connection to list + cfg = [connection.create_config()] + + resp = self.__post("/api/v1/ncs/network-connections", cfg) + if resp.is_valid_json_list_with_status(202, 1, 1) and "href" in resp.json[0]: + connection.href = resp.json[0]["href"] + LOGGER.info(f"Created connection {connection}") + return connection.href + else: + LOGGER.error(f"Create failure for connection {connection}, result {resp}") + return None + + def update_connection(self, href: str, connection: Connection, existing_connection: Optional[Connection]=None) -> Optional[str]: + cfg = connection.create_config() + + # Endpoint updates + # Current CM implementation returns 501 (not implemented) for all of these actions + + # CM does not accept endpoint updates properly in same format that is used in initial creation. + # Instead we work around by using more granular APIs. + if "endpoints" in cfg: + del cfg["endpoints"] + if existing_connection is None: + existing_connection = self.get_connection_by_href(href) + ep_deletes, ep_creates, ep_updates = connection.get_endpoint_updates(existing_connection) + #print(ep_deletes) + #print(ep_creates) + #print(ep_updates) + + # Perform deletes + for ep_href in ep_deletes: + resp = self.__delete(f"/api/v1/ncs{ep_href}") + if resp.is_valid_with_status_ignore_body(202): + LOGGER.info(f"update_connection: EP-UPDATE: Deleted connection endpoint {ep_href}") + else: + LOGGER.info(f"update_connection: EP-UPDATE: Failed to delete connection endpoint {ep_href}: {resp}") + + # Update capacities for otherwise similar endpoints + for ep_href, ep_cfg in ep_updates: + resp = self.__put(f"/api/v1/ncs{ep_href}", ep_cfg) + if resp.is_valid_with_status_ignore_body(202): + LOGGER.info(f"update_connection: EP-UPDATE: Updated connection endpoint {ep_href} with {ep_cfg}") + else: + LOGGER.info(f"update_connection: EP-UPDATE: Failed to update connection endpoint {ep_href} with {ep_cfg}: {resp}") + + # Perform adds + resp = self.__post(f"/api/v1/ncs{href}/endpoints", ep_creates) + if resp.is_valid_json_list_with_status(202, 1, 1) and "href" in resp.json[0]: + LOGGER.info(f"update_connection: EP-UPDATE: Created connection endpoints {resp.json[0]} with {ep_creates}") + else: + LOGGER.info(f"update_connection: EP-UPDATE: Failed to create connection endpoints {resp.json[0] if resp.json else None} with {ep_creates}: {resp}") + + # Connection update (excluding endpoints) + resp = self.__put(f"/api/v1/ncs{href}", cfg) + # Returns empty body + if resp.is_valid_with_status_ignore_body(202): + LOGGER.info(f"update_connection: Updated connection {connection}") + # Return href used for update to be consisten with create + return href + else: + LOGGER.error(f"update_connection: Update failure for connection {connection}, result {resp}") + return None + + def delete_connection(self, href: str) -> bool: + resp = self.__delete(f"/api/v1/ncs{href}") + #print(resp) + # Returns empty body + if resp.is_valid_with_status_ignore_body(202): + LOGGER.info(f"Deleted connection {href=}") + return True + else: + return False + + # Always does the correct thing, that is update if present, otherwise create + def create_or_update_connection(self, connection: Connection) -> Optional[str]: + existing_connection = self.get_connection_by_name(connection.name) + if existing_connection: + return self.update_connection(existing_connection.href, connection, existing_connection) + else: + return self.create_connection(connection) + + def get_connection_by_name(self, connection_name: str) -> Optional[Connection]: + qparams = [ + ('content', 'expanded'), + ('q', '{"state.name": "' + connection_name + '"}') + ] + r = self.__get("/api/v1/ncs/network-connections", params=qparams) + if r.is_valid_json_list_with_status(200, 1, 1): + return Connection(from_json=r.json[0]) + else: + return None + + def get_connection_by_href(self, href: str) -> Optional[Connection]: + qparams = [ + ('content', 'expanded'), + ] + r = self.__get(f"/api/v1/ncs{href}", params=qparams) + if r.is_valid_json_obj_with_status(200): + return Connection(from_json=r.json) + else: + return None + + def get_connection_by_teraflow_uuid(self, uuid: str) -> Optional[Connection]: + return self.get_connection_by_name(f"TF:{uuid}") + + def get_connections(self): + r = self.__get("/api/v1/ncs/network-connections?content=expanded") + if r.is_valid_json_list_with_status(200): + return [Connection(from_json=c) for c in r.json] + else: + return [] + + def service_uuid(self, key: str) -> Optional[str]: + service = re.match(r"^/service\[(.+)\]$", key) + if service: + return service.group(1) + else: + return None diff --git a/src/device/service/drivers/xr/cm/connection.py b/src/device/service/drivers/xr/cm/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..e88995842eb0b6266d4d8eb42e2cc3197d89bea1 --- /dev/null +++ b/src/device/service/drivers/xr/cm/connection.py @@ -0,0 +1,179 @@ +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, Optional +from dataclasses import dataclass +from .tf_service import TFService +from .utils import make_selector, set_optional_parameter + +class InconsistentVlanConfiguration(Exception): + pass + +@dataclass +class CEndpoint: + module: str + port: str + # Emulated/translated VLAN. May also be a letter + # Only present on TF side, never on gets from CM. + # VLAN is never transmitted to wire on endpoint, it is purely an internal construct + # However VLAN is part of the whole connection + vlan: str + capacity: int + href: Optional[str] + + + def ifname(self) -> str: + if self.vlan is None: + return self.module + "|" + self.port + else: + return self.module + "|" + self.port + "." + self.vlan + + def portname(self) -> str: + return self.module + "|" + self.port + + def __str__(self): + return f"({self.ifname()}, {self.capacity})" + + def create_config(self) -> Dict[str, any]: + cfg = { + # VLAN is intentionally ignored here (None argument below) + "selector": make_selector(self.module, self.port, None) + } + if self.capacity > 0: + cfg["capacity"] = self.capacity + + return cfg + +class ConnectionDeserializationError(Exception): + pass + +class Connection: + def __init__(self, from_json: Optional[Dict[str, any]] = None, from_tf_service: Optional[TFService] = None): + def get_endpoint_mod_aid(endpoint: Dict[str, any]) -> Optional[str]: + try: + return (endpoint["state"]["moduleIf"]["moduleName"], endpoint["state"]["moduleIf"]["clientIfAid"]) + except KeyError: + return None + + def get_endpoint_capacity(endpoint: Dict[str, any]) -> int: + try: + return int(endpoint["state"]["capacity"]) + except KeyError: + return 0 + + if from_json: + try: + config = from_json["config"] + state = from_json["state"] + self.name = state["name"] if "name" in state else None #Name is optional + self.serviceMode = state["serviceMode"] + self.mc = config["mc"] if "mc" in config else None + self.vlan_filter = state["outerVID"] if "outerVID" in state else None + self.href = from_json["href"] + + self.endpoints = [] + for ep in from_json["endpoints"]: + ep_mod_aip = get_endpoint_mod_aid(ep) + if ep_mod_aip: + self.endpoints.append(CEndpoint(*ep_mod_aip, None, get_endpoint_capacity(ep), ep["href"])) + self.cm_data = from_json + except KeyError as e: + raise ConnectionDeserializationError(f"Missing mandatory key {str(e)}") from e + elif from_tf_service: + self.href = None + self.name = from_tf_service.name() + self.endpoints = [CEndpoint(mod, port, vlan, from_tf_service.capacity, None) for mod,port,vlan in from_tf_service.get_endpoints_mod_aid_vlan()] + # Service mode guessing has to be AFTER endpoint assigment. + # The heuristic used is perfectly valid in context of TF where we always encode + # VLANs to interface names. Correspondingly cm-cli user has to know + # to use VLANs on low level test APIs when using VTI mode. + self.serviceMode = self.__guess_service_mode_from_emulated_enpoints() + if self.serviceMode == "portMode": + self.vlan_filter = None + self.mc = None + else: + self.vlan_filter = str(self.__guess_vlan_id()) + " " # Needs to be in string format, can contain ranges, regexp is buggy, trailin space is needed for single VLAN + self.mc = "matchOuterVID" + + self.cm_data = None + else: + # May support other initializations in future + raise ConnectionDeserializationError("JSON dict missing") + + def __str__(self): + name = self.name if self.name else "<NO NAME>" + endpoints = ", ".join((str(ep) for ep in self.endpoints)) + return f"name: {name}, id: {self.href}, service-mode: {self.serviceMode}, end-points: [{endpoints}]" + + def __guess_service_mode_from_emulated_enpoints(self): + for ep in self.endpoints: + if ep.vlan is not None: + return "vtiP2pSymmetric" + return "portMode" + + def __guess_vlan_id(self) -> int: + vlans = [] + for ep in self.endpoints: + if ep.vlan is not None and ep.vlan.isnumeric(): + vlans.append(int(ep.vlan)) + if not vlans: + raise InconsistentVlanConfiguration("VLAN ID is not encoded in TF interface names for VTI mode service") + else: + for vlan in vlans: + if vlan != vlans[0]: + raise InconsistentVlanConfiguration(f"VLAN configuration must match at both ends of the connection, {vlans[0]} != {vlan}") + return vlans[0] + + def create_config(self) -> Dict[str, any]: + cfg = {} + set_optional_parameter(cfg, "name", self.name) + cfg["serviceMode"] = self.serviceMode + if self.endpoints: + cfg["endpoints"] = [ep.create_config() for ep in self.endpoints] + set_optional_parameter(cfg, "outerVID", self.vlan_filter) + set_optional_parameter(cfg, "mc", self.mc) + #print(cfg) + return cfg + + def get_port_map(self) -> Dict[str, CEndpoint]: + return {ep.portname(): ep for ep in self.endpoints } + + # Type hint has to be string, because future annotations (enclosing class) + # is not yet widely available + def get_endpoint_updates(self, old: Optional['Connection']): # -> Tuple[List[str], List[Dict[str, any], List[Tuple[str, Dict[str, any]]]]]: + new_ports = self.get_port_map() + + if old is None: + return ([], [new_ep.create_config() for new_ep in new_ports.values()], []) + + # Can only compute difference against get from CM, as hrefs are needed + assert old.cm_data is not None + + old_ports = old.get_port_map() + + deletes = [] + creates = [] + updates = [] + for port, old_ep in old_ports.items(): + if port not in new_ports: + assert old_ep.href is not None + deletes.append(old_ep.href) + + for port, new_ep in new_ports.items(): + if port not in old_ports: + creates.append(new_ep.create_config()) + elif old_ports[port].capacity != new_ep.capacity: + updates.append((old_ports[port].href, {"capacity": new_ep.capacity})) + return deletes, creates, updates diff --git a/src/device/service/drivers/xr/cm/constellation.py b/src/device/service/drivers/xr/cm/constellation.py new file mode 100644 index 0000000000000000000000000000000000000000..468cf70b6180080bfa11ea3321aca1af623b73fc --- /dev/null +++ b/src/device/service/drivers/xr/cm/constellation.py @@ -0,0 +1,69 @@ +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring, wildcard-import, unused-wildcard-import +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List +from .utils import * + +class ConstellationDeserializationError(Exception): + pass + +class Constellation: + def __init__(self, from_json=None): + if from_json: + try: + self.constellation_id = from_json["id"] + self.__hub_interfaces = [] + self.__leaf_interfaces = [] + self.__traffic_mode = None + # Intentional simplification for Teraflow. Constellation could have + # diverse traffic modes, however that does not occur in intended TF usage. + if "hubModule" in from_json: + hub = from_json["hubModule"] + self.traffic_mode = hub["state"]["module"]["trafficMode"] + self.__hub_interfaces.extend(get_constellation_module_ifnames(hub)) + if "leafModules" in from_json: + for leaf in from_json["leafModules"]: + if not self.__traffic_mode: + self.traffic_mode = leaf["state"]["module"]["trafficMode"] + self.__leaf_interfaces.extend(get_constellation_module_ifnames(leaf)) + except KeyError as e: + raise ConstellationDeserializationError(f"Missing mandatory key {str(e)}") from e + else: + # May support other initializations in future + raise ConstellationDeserializationError("JSON dict missing") + + def add_vlan_posfix(ifname, is_hub): + if is_hub: + # +100 so we don't need to worry about special meanings of VLANs 0 and 1 + return [f"{ifname}.{vlan+100}" for vlan in range(0,16)] + else: + return [f"{ifname}.{chr(ord('a') + vlan)}" for vlan in range(0,16)] + + self.__vti_hub_interfaces = [] + self.__vti_leaf_interfaces = [] + if self.is_vti_mode(): + for ifname in self.__hub_interfaces: + self.__vti_hub_interfaces.extend(add_vlan_posfix(ifname, True)) + for ifname in self.__leaf_interfaces: + self.__vti_leaf_interfaces.extend(add_vlan_posfix(ifname, False)) + + def ifnames(self) -> List[str]: + if self.is_vti_mode(): + return self.__vti_hub_interfaces + self.__vti_leaf_interfaces + else: + return self.__hub_interfaces + self.__leaf_interfaces + + def is_vti_mode(self) -> bool: + return self.traffic_mode != "L1Mode" diff --git a/src/device/service/drivers/xr/cm/tests/__init__.py b/src/device/service/drivers/xr/cm/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a --- /dev/null +++ b/src/device/service/drivers/xr/cm/tests/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/device/service/drivers/xr/cm/tests/resources/connections-expanded.json b/src/device/service/drivers/xr/cm/tests/resources/connections-expanded.json new file mode 100644 index 0000000000000000000000000000000000000000..f9f064ea20c3764ad0a5e4d0d3dfb60b468c2556 --- /dev/null +++ b/src/device/service/drivers/xr/cm/tests/resources/connections-expanded.json @@ -0,0 +1,290 @@ +[ + { + "config": { + "name": "FooBar123", + "serviceMode": "portMode" + }, + "endpoints": [ + { + "acs": [], + "config": { + "selector": {} + }, + "href": "/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/230516d0-7e38-44b1-b174-1ba7d4454ee6", + "id": "230516d0-7e38-44b1-b174-1ba7d4454ee6", + "parentId": "4505d5d3-b2f3-40b8-8ec2-4a5b28523c03", + "rt": [ + "cm.network-connection.endpoint" + ], + "state": { + "hostPort": { + "chassisId": "192.168.101.1", + "chassisIdSubtype": "networkAddress", + "name": "", + "portDescr": "et-1/0/0:0", + "portId": "et-1/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:11:01", + "sysName": "PaloAlto" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100, + "currentRole": "leaf", + "macAddress": "00:0B:F8:00:01:01", + "moduleId": "555a0f6e-285d-4a97-70f2-9fa4201d422d", + "moduleName": "XR LEAF 1", + "serialNumber": "00000000B" + } + } + }, + { + "acs": [], + "config": { + "selector": {} + }, + "href": "/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/1d58ba8f-4d51-4213-83e1-97a0e0bdd388", + "id": "1d58ba8f-4d51-4213-83e1-97a0e0bdd388", + "parentId": "4505d5d3-b2f3-40b8-8ec2-4a5b28523c03", + "rt": [ + "cm.network-connection.endpoint" + ], + "state": { + "hostPort": { + "chassisId": "192.168.100.1", + "chassisIdSubtype": "networkAddress", + "name": "", + "portDescr": "et-1/0/0:0", + "portId": "et-1/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:00:11", + "sysName": "SanJose" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100, + "currentRole": "hub", + "macAddress": "00:0B:F8:00:00:01", + "moduleId": "e1d3a030-4f19-4efc-50c0-5a48609ad356", + "moduleName": "XR HUB 1", + "serialNumber": "000000009" + } + } + } + ], + "href": "/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03", + "id": "4505d5d3-b2f3-40b8-8ec2-4a5b28523c03", + "lcs": [ + { + "config": { + "clientAid": "XR-T1", + "direction": "txRx", + "dscgAid": "XR-L1-C1-DSCG1", + "moduleId": "555a0f6e-285d-4a97-70f2-9fa4201d422d" + }, + "href": "/lcs/5872e191-774f-4ae9-841a-ea743be01973", + "id": "5872e191-774f-4ae9-841a-ea743be01973", + "parentIds": [ + "4505d5d3-b2f3-40b8-8ec2-4a5b28523c03" + ], + "rt": [ + "cm.network-connection.local-connection" + ], + "state": { + "clientAid": "XR-T1", + "colId": 1, + "direction": "txRx", + "dscgAid": "XR-L1-C1-DSCG1", + "lcAid": "XR-T1,XR-L1-C1-DSCG1-1-ODUji-1", + "lineAid": "XR-L1-C1-DSCG1-1-ODUji-1", + "macAddress": "00:0B:F8:00:01:01", + "moduleId": "555a0f6e-285d-4a97-70f2-9fa4201d422d", + "remoteClientAid": "XR-T1", + "remoteModuleId": "00:0B:F8:00:00:01" + } + }, + { + "config": { + "clientAid": "XR-T1", + "direction": "txRx", + "dscgAid": "XR-L1-C1-DSCG1", + "moduleId": "e1d3a030-4f19-4efc-50c0-5a48609ad356" + }, + "href": "/lcs/c96a7954-2d12-48aa-8cfb-6a3cf5566cb0", + "id": "c96a7954-2d12-48aa-8cfb-6a3cf5566cb0", + "parentIds": [ + "4505d5d3-b2f3-40b8-8ec2-4a5b28523c03" + ], + "rt": [ + "cm.network-connection.local-connection" + ], + "state": { + "clientAid": "XR-T1", + "colId": 1, + "direction": "txRx", + "dscgAid": "XR-L1-C1-DSCG1", + "lcAid": "XR-T1,XR-L1-C1-DSCG1-1-ODUji-1", + "lineAid": "XR-L1-C1-DSCG1-1-ODUji-1", + "macAddress": "00:0B:F8:00:00:01", + "moduleId": "e1d3a030-4f19-4efc-50c0-5a48609ad356", + "remoteClientAid": "XR-T1", + "remoteModuleId": "00:0B:F8:00:01:01" + } + } + ], + "rt": [ + "cm.network-connection" + ], + "state": { + "createdBy": "host", + "lifecycleState": "configured", + "name": "FooBar123", + "serviceMode": "portMode" + } + }, + { + "config": { + "serviceMode": "portMode" + }, + "endpoints": [ + { + "acs": [], + "config": { + "selector": {} + }, + "href": "/network-connections/138f0cc0-3dc6-4195-97c0-2cbed5fd59ba/endpoints/59027aa4-858b-4d62-86b9-0f2d3738619c", + "id": "59027aa4-858b-4d62-86b9-0f2d3738619c", + "parentId": "138f0cc0-3dc6-4195-97c0-2cbed5fd59ba", + "rt": [ + "cm.network-connection.endpoint" + ], + "state": { + "hostPort": { + "chassisId": "192.168.100.1", + "chassisIdSubtype": "networkAddress", + "name": "", + "portDescr": "et-2/0/0:0", + "portId": "et-2/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:00:21", + "sysName": "SanJose" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100, + "currentRole": "hub", + "macAddress": "00:0B:F8:00:00:02", + "moduleId": "b6577725-9939-4b60-6be9-603bd210cde1", + "moduleName": "XR HUB 2", + "serialNumber": "00000000A" + } + } + }, + { + "acs": [], + "config": { + "selector": {} + }, + "href": "/network-connections/138f0cc0-3dc6-4195-97c0-2cbed5fd59ba/endpoints/b2fc53e2-41a1-4fe5-8f03-f91a11e52661", + "id": "b2fc53e2-41a1-4fe5-8f03-f91a11e52661", + "parentId": "138f0cc0-3dc6-4195-97c0-2cbed5fd59ba", + "rt": [ + "cm.network-connection.endpoint" + ], + "state": { + "hostPort": { + "chassisId": "192.168.101.3", + "chassisIdSubtype": "networkAddress", + "name": "", + "portDescr": "et-3/0/0:0", + "portId": "et-3/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:13:01", + "sysName": "Sunnnyvale" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100, + "currentRole": "leaf", + "macAddress": "00:0B:F8:00:01:03", + "moduleId": "00c6875e-21bf-4934-51c7-125ebd8d0559", + "moduleName": "XR LEAF 3", + "serialNumber": "00000000D" + } + } + } + ], + "href": "/network-connections/138f0cc0-3dc6-4195-97c0-2cbed5fd59ba", + "id": "138f0cc0-3dc6-4195-97c0-2cbed5fd59ba", + "lcs": [ + { + "config": { + "clientAid": "XR-T1", + "direction": "txRx", + "dscgAid": "XR-L1-C1-DSCG1", + "moduleId": "00c6875e-21bf-4934-51c7-125ebd8d0559" + }, + "href": "/lcs/0f8fe422-4f4e-4e78-8489-ee85031e083c", + "id": "0f8fe422-4f4e-4e78-8489-ee85031e083c", + "parentIds": [ + "138f0cc0-3dc6-4195-97c0-2cbed5fd59ba" + ], + "rt": [ + "cm.network-connection.local-connection" + ], + "state": { + "clientAid": "XR-T1", + "colId": 1, + "direction": "txRx", + "dscgAid": "XR-L1-C1-DSCG1", + "lcAid": "XR-T1,XR-L1-C1-DSCG1-1-ODUji-1", + "lineAid": "XR-L1-C1-DSCG1-1-ODUji-1", + "macAddress": "00:0B:F8:00:01:03", + "moduleId": "00c6875e-21bf-4934-51c7-125ebd8d0559", + "remoteClientAid": "XR-T1", + "remoteModuleId": "00:0B:F8:00:00:02" + } + }, + { + "config": { + "clientAid": "XR-T1", + "direction": "txRx", + "dscgAid": "XR-L1-C1-DSCG1", + "moduleId": "b6577725-9939-4b60-6be9-603bd210cde1" + }, + "href": "/lcs/7c769310-22b5-4d4c-8e9d-386a8083c611", + "id": "7c769310-22b5-4d4c-8e9d-386a8083c611", + "parentIds": [ + "138f0cc0-3dc6-4195-97c0-2cbed5fd59ba" + ], + "rt": [ + "cm.network-connection.local-connection" + ], + "state": { + "clientAid": "XR-T1", + "colId": 1, + "direction": "txRx", + "dscgAid": "XR-L1-C1-DSCG1", + "lcAid": "XR-T1,XR-L1-C1-DSCG1-1-ODUji-1", + "lineAid": "XR-L1-C1-DSCG1-1-ODUji-1", + "macAddress": "00:0B:F8:00:00:02", + "moduleId": "b6577725-9939-4b60-6be9-603bd210cde1", + "remoteClientAid": "XR-T1", + "remoteModuleId": "00:0B:F8:00:01:03" + } + } + ], + "rt": [ + "cm.network-connection" + ], + "state": { + "createdBy": "host", + "lifecycleState": "configured", + "serviceMode": "portMode" + } + } +] \ No newline at end of file diff --git a/src/device/service/drivers/xr/cm/tests/resources/constellation-by-name-hub1.json b/src/device/service/drivers/xr/cm/tests/resources/constellation-by-name-hub1.json new file mode 100644 index 0000000000000000000000000000000000000000..061d6453edebc764a96fe3eaeace1168200b1a20 --- /dev/null +++ b/src/device/service/drivers/xr/cm/tests/resources/constellation-by-name-hub1.json @@ -0,0 +1,388 @@ +[ + { + "href": "/xr-networks/233e169b-5d88-481d-bfe2-c909a2a859dd", + "hubModule": { + "href": "/xr-networks/233e169b-5d88-481d-bfe2-c909a2a859dd/hubModule", + "id": "519cc31f-b736-4e4c-b78d-600562d92911", + "parentId": "233e169b-5d88-481d-bfe2-c909a2a859dd", + "rt": [ + "cm.xr-network.hubModule" + ], + "state": { + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.100.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-1/0/0:0", + "portId": "et-1/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:00:11", + "sysName": "SanJose" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + }, + { + "hostPort": { + "chassisId": "192.168.100.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-1/0/0:1", + "portId": "et-1/0/0:1", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:00:12", + "sysName": "SanJose" + }, + "moduleIf": { + "clientIfAid": "XR-T2", + "clientIfColId": 2, + "clientIfPortSpeed": 100 + } + }, + { + "hostPort": { + "chassisId": "192.168.100.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-1/0/0:2", + "portId": "et-1/0/0:2", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:00:13", + "sysName": "SanJose" + }, + "moduleIf": { + "clientIfAid": "XR-T3", + "clientIfColId": 3, + "clientIfPortSpeed": 100 + } + }, + { + "hostPort": { + "chassisId": "192.168.100.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-1/0/0:3", + "portId": "et-1/0/0:3", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:00:14", + "sysName": "SanJose" + }, + "moduleIf": { + "clientIfAid": "XR-T4", + "clientIfColId": 4, + "clientIfPortSpeed": 100 + } + } + ], + "lifecycleState": "configured", + "module": { + "baudRate": 50, + "capacity": 400, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 192000000, + "currentRole": "hub", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:00:01", + "modulation": "16QAM", + "moduleId": "d859de3c-c463-4be5-7a8d-a198275f10f4", + "moduleName": "XR HUB 1", + "ncoFrequency": 0, + "operatingFrequency": 192000000, + "roleStatus": "ready", + "serialNumber": "000000009", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + }, + "id": "233e169b-5d88-481d-bfe2-c909a2a859dd", + "leafModules": [ + { + "href": "/xr-networks/233e169b-5d88-481d-bfe2-c909a2a859dd/leafModules/7e9da66b-8bf8-4eea-b4a7-045e5ba3bfd8", + "id": "7e9da66b-8bf8-4eea-b4a7-045e5ba3bfd8", + "parentId": "233e169b-5d88-481d-bfe2-c909a2a859dd", + "rt": [ + "cm.xr-network.leafModule" + ], + "state": { + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.101.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-1/0/0:0", + "portId": "et-1/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:11:01", + "sysName": "PaloAlto" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + } + ], + "lifecycleState": "configured", + "module": { + "baudRate": 50, + "capacity": 100, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 192000000, + "currentRole": "leaf", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:01:01", + "maxAllowedDSCs": 4, + "modulation": "16QAM", + "moduleId": "7c33d7d0-4f7b-4525-5d57-58589adbd47c", + "moduleName": "XR LEAF 1", + "ncoFrequency": 0, + "operatingFrequency": 192000000, + "roleStatus": "ready", + "serialNumber": "00000000B", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + }, + { + "href": "/xr-networks/233e169b-5d88-481d-bfe2-c909a2a859dd/leafModules/7473b336-ef92-4508-b260-c096d05e4943", + "id": "7473b336-ef92-4508-b260-c096d05e4943", + "parentId": "233e169b-5d88-481d-bfe2-c909a2a859dd", + "rt": [ + "cm.xr-network.leafModule" + ], + "state": { + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.101.2", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-2/0/0:0", + "portId": "et-2/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:12:01", + "sysName": "Cupertino" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + } + ], + "lifecycleState": "configured", + "module": { + "baudRate": 50, + "capacity": 100, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 192000000, + "currentRole": "leaf", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:01:02", + "maxAllowedDSCs": 4, + "modulation": "16QAM", + "moduleId": "d68a6b4e-03e4-4c89-5ad5-c5e782325e40", + "moduleName": "XR LEAF 2", + "ncoFrequency": 0, + "operatingFrequency": 192000000, + "roleStatus": "ready", + "serialNumber": "00000000C", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + } + ], + "reachableModules": [ + { + "href": "/xr-networks/233e169b-5d88-481d-bfe2-c909a2a859dd/reachableModules/58785cd9-c642-43e4-a8b5-6d136acd8ae5", + "id": "58785cd9-c642-43e4-a8b5-6d136acd8ae5", + "parentId": "233e169b-5d88-481d-bfe2-c909a2a859dd", + "rt": [ + "cm.xr-network.reachableModule" + ], + "state": { + "discoveredTime": "2022-06-28T09:04:08Z", + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.101.3", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-3/0/0:0", + "portId": "et-3/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:13:01", + "sysName": "Sunnnyvale" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + } + ], + "module": { + "baudRate": 50, + "capacity": 100, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 193000000, + "currentRole": "leaf", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:01:03", + "modulation": "16QAM", + "moduleId": "572b2d8a-8d0b-40a0-5823-e53041ca2194", + "moduleName": "XR LEAF 3", + "ncoFrequency": 0, + "operatingFrequency": 193000000, + "roleStatus": "ready", + "serialNumber": "00000000D", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + }, + { + "href": "/xr-networks/233e169b-5d88-481d-bfe2-c909a2a859dd/reachableModules/be85d276-6f30-4c7b-9f63-de8679dfab85", + "id": "be85d276-6f30-4c7b-9f63-de8679dfab85", + "parentId": "233e169b-5d88-481d-bfe2-c909a2a859dd", + "rt": [ + "cm.xr-network.reachableModule" + ], + "state": { + "discoveredTime": "2022-06-28T09:04:05Z", + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.101.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-1/0/0:0", + "portId": "et-1/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:11:01", + "sysName": "PaloAlto" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + } + ], + "module": { + "baudRate": 50, + "capacity": 100, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 192000000, + "currentRole": "leaf", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:01:01", + "modulation": "16QAM", + "moduleId": "7c33d7d0-4f7b-4525-5d57-58589adbd47c", + "moduleName": "XR LEAF 1", + "ncoFrequency": 0, + "operatingFrequency": 192000000, + "roleStatus": "ready", + "serialNumber": "00000000B", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + }, + { + "href": "/xr-networks/233e169b-5d88-481d-bfe2-c909a2a859dd/reachableModules/212cf331-c133-4321-8e74-023549b9afee", + "id": "212cf331-c133-4321-8e74-023549b9afee", + "parentId": "233e169b-5d88-481d-bfe2-c909a2a859dd", + "rt": [ + "cm.xr-network.reachableModule" + ], + "state": { + "discoveredTime": "2022-06-28T09:04:06Z", + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.101.2", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-2/0/0:0", + "portId": "et-2/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:12:01", + "sysName": "Cupertino" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + } + ], + "module": { + "baudRate": 50, + "capacity": 100, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 192000000, + "currentRole": "leaf", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:01:02", + "modulation": "16QAM", + "moduleId": "d68a6b4e-03e4-4c89-5ad5-c5e782325e40", + "moduleName": "XR LEAF 2", + "ncoFrequency": 0, + "operatingFrequency": 192000000, + "roleStatus": "ready", + "serialNumber": "00000000C", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + } + ], + "rt": [ + "cm.xr-network" + ], + "state": { + "constellationFrequency": 192000000, + "controlLinks": [ + { + "conState": "active", + "destinationModuleId": "7c33d7d0-4f7b-4525-5d57-58589adbd47c", + "lastConStateChange": "2022-06-28T09:04:05Z", + "sourceModuleId": "d859de3c-c463-4be5-7a8d-a198275f10f4" + }, + { + "conState": "active", + "destinationModuleId": "d859de3c-c463-4be5-7a8d-a198275f10f4", + "lastConStateChange": "2022-06-28T09:04:05Z", + "sourceModuleId": "7c33d7d0-4f7b-4525-5d57-58589adbd47c" + }, + { + "conState": "active", + "destinationModuleId": "d68a6b4e-03e4-4c89-5ad5-c5e782325e40", + "lastConStateChange": "2022-06-28T09:04:06Z", + "sourceModuleId": "d859de3c-c463-4be5-7a8d-a198275f10f4" + }, + { + "conState": "active", + "destinationModuleId": "d859de3c-c463-4be5-7a8d-a198275f10f4", + "lastConStateChange": "2022-06-28T09:04:05Z", + "sourceModuleId": "d68a6b4e-03e4-4c89-5ad5-c5e782325e40" + } + ], + "lifecycleState": "configured", + "modulation": "16QAM" + } + } +] diff --git a/src/device/service/drivers/xr/cm/tests/resources/constellations-expanded.json b/src/device/service/drivers/xr/cm/tests/resources/constellations-expanded.json new file mode 100644 index 0000000000000000000000000000000000000000..cfe310f4b1340395d660f775d34086591a8e8827 --- /dev/null +++ b/src/device/service/drivers/xr/cm/tests/resources/constellations-expanded.json @@ -0,0 +1,662 @@ +[ + { + "href": "/xr-networks/6774cc4e-b0b1-43a1-923f-80fb1bec094b", + "hubModule": { + "href": "/xr-networks/6774cc4e-b0b1-43a1-923f-80fb1bec094b/hubModule", + "id": "353563a1-895f-4110-abec-8f59ffb5ecc7", + "parentId": "6774cc4e-b0b1-43a1-923f-80fb1bec094b", + "rt": [ + "cm.xr-network.hubModule" + ], + "state": { + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.100.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-2/0/0:0", + "portId": "et-2/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:00:21", + "sysName": "SanJose" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + }, + { + "hostPort": { + "chassisId": "192.168.100.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-2/0/0:1", + "portId": "et-2/0/0:1", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:00:22", + "sysName": "SanJose" + }, + "moduleIf": { + "clientIfAid": "XR-T2", + "clientIfColId": 2, + "clientIfPortSpeed": 100 + } + }, + { + "hostPort": { + "chassisId": "192.168.100.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-2/0/0:2", + "portId": "et-2/0/0:2", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:00:23", + "sysName": "SanJose" + }, + "moduleIf": { + "clientIfAid": "XR-T3", + "clientIfColId": 3, + "clientIfPortSpeed": 100 + } + }, + { + "hostPort": { + "chassisId": "192.168.100.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-2/0/0:3", + "portId": "et-2/0/0:3", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:00:24", + "sysName": "SanJose" + }, + "moduleIf": { + "clientIfAid": "XR-T4", + "clientIfColId": 4, + "clientIfPortSpeed": 100 + } + } + ], + "lifecycleState": "configured", + "module": { + "baudRate": 50, + "capacity": 400, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 193000000, + "currentRole": "hub", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:00:02", + "modulation": "16QAM", + "moduleId": "3a2b5cfe-6265-4b68-549d-340c58363b85", + "moduleName": "XR HUB 2", + "ncoFrequency": 0, + "operatingFrequency": 193000000, + "roleStatus": "ready", + "serialNumber": "00000000A", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + }, + "id": "6774cc4e-b0b1-43a1-923f-80fb1bec094b", + "leafModules": [ + { + "href": "/xr-networks/6774cc4e-b0b1-43a1-923f-80fb1bec094b/leafModules/e659ad54-9e6d-492c-ac56-09b3b681c5ed", + "id": "e659ad54-9e6d-492c-ac56-09b3b681c5ed", + "parentId": "6774cc4e-b0b1-43a1-923f-80fb1bec094b", + "rt": [ + "cm.xr-network.leafModule" + ], + "state": { + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.101.3", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-3/0/0:0", + "portId": "et-3/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:13:01", + "sysName": "Sunnnyvale" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + } + ], + "lifecycleState": "configured", + "module": { + "baudRate": 50, + "capacity": 100, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 193000000, + "currentRole": "leaf", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:01:03", + "modulation": "16QAM", + "moduleId": "572b2d8a-8d0b-40a0-5823-e53041ca2194", + "moduleName": "XR LEAF 3", + "ncoFrequency": 0, + "operatingFrequency": 193000000, + "roleStatus": "ready", + "serialNumber": "00000000D", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + } + ], + "reachableModules": [ + { + "href": "/xr-networks/6774cc4e-b0b1-43a1-923f-80fb1bec094b/reachableModules/ede4b98b-a6a7-48fa-89c0-b981e7d4a98c", + "id": "ede4b98b-a6a7-48fa-89c0-b981e7d4a98c", + "parentId": "6774cc4e-b0b1-43a1-923f-80fb1bec094b", + "rt": [ + "cm.xr-network.reachableModule" + ], + "state": { + "discoveredTime": "2022-06-28T09:04:08Z", + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.101.3", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-3/0/0:0", + "portId": "et-3/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:13:01", + "sysName": "Sunnnyvale" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + } + ], + "module": { + "baudRate": 50, + "capacity": 100, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 193000000, + "currentRole": "leaf", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:01:03", + "modulation": "16QAM", + "moduleId": "572b2d8a-8d0b-40a0-5823-e53041ca2194", + "moduleName": "XR LEAF 3", + "ncoFrequency": 0, + "operatingFrequency": 193000000, + "roleStatus": "ready", + "serialNumber": "00000000D", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + }, + { + "href": "/xr-networks/6774cc4e-b0b1-43a1-923f-80fb1bec094b/reachableModules/004ffffb-290f-45d8-90bf-4e0c914eb39c", + "id": "004ffffb-290f-45d8-90bf-4e0c914eb39c", + "parentId": "6774cc4e-b0b1-43a1-923f-80fb1bec094b", + "rt": [ + "cm.xr-network.reachableModule" + ], + "state": { + "discoveredTime": "2022-06-28T09:04:05Z", + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.101.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-1/0/0:0", + "portId": "et-1/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:11:01", + "sysName": "PaloAlto" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + } + ], + "module": { + "baudRate": 50, + "capacity": 100, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 192000000, + "currentRole": "leaf", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:01:01", + "modulation": "16QAM", + "moduleId": "7c33d7d0-4f7b-4525-5d57-58589adbd47c", + "moduleName": "XR LEAF 1", + "ncoFrequency": 0, + "operatingFrequency": 192000000, + "roleStatus": "ready", + "serialNumber": "00000000B", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + } + ], + "rt": [ + "cm.xr-network" + ], + "state": { + "constellationFrequency": 193000000, + "controlLinks": [ + { + "conState": "active", + "destinationModuleId": "3a2b5cfe-6265-4b68-549d-340c58363b85", + "lastConStateChange": "2022-06-28T09:04:07Z", + "sourceModuleId": "572b2d8a-8d0b-40a0-5823-e53041ca2194" + }, + { + "conState": "active", + "destinationModuleId": "572b2d8a-8d0b-40a0-5823-e53041ca2194", + "lastConStateChange": "2022-06-28T09:04:08Z", + "sourceModuleId": "3a2b5cfe-6265-4b68-549d-340c58363b85" + } + ], + "lifecycleState": "configured", + "modulation": "16QAM" + } + }, + { + "href": "/xr-networks/233e169b-5d88-481d-bfe2-c909a2a859dd", + "hubModule": { + "href": "/xr-networks/233e169b-5d88-481d-bfe2-c909a2a859dd/hubModule", + "id": "519cc31f-b736-4e4c-b78d-600562d92911", + "parentId": "233e169b-5d88-481d-bfe2-c909a2a859dd", + "rt": [ + "cm.xr-network.hubModule" + ], + "state": { + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.100.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-1/0/0:0", + "portId": "et-1/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:00:11", + "sysName": "SanJose" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + }, + { + "hostPort": { + "chassisId": "192.168.100.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-1/0/0:1", + "portId": "et-1/0/0:1", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:00:12", + "sysName": "SanJose" + }, + "moduleIf": { + "clientIfAid": "XR-T2", + "clientIfColId": 2, + "clientIfPortSpeed": 100 + } + }, + { + "hostPort": { + "chassisId": "192.168.100.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-1/0/0:2", + "portId": "et-1/0/0:2", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:00:13", + "sysName": "SanJose" + }, + "moduleIf": { + "clientIfAid": "XR-T3", + "clientIfColId": 3, + "clientIfPortSpeed": 100 + } + }, + { + "hostPort": { + "chassisId": "192.168.100.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-1/0/0:3", + "portId": "et-1/0/0:3", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:00:14", + "sysName": "SanJose" + }, + "moduleIf": { + "clientIfAid": "XR-T4", + "clientIfColId": 4, + "clientIfPortSpeed": 100 + } + } + ], + "lifecycleState": "configured", + "module": { + "baudRate": 50, + "capacity": 400, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 192000000, + "currentRole": "hub", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:00:01", + "modulation": "16QAM", + "moduleId": "d859de3c-c463-4be5-7a8d-a198275f10f4", + "moduleName": "XR HUB 1", + "ncoFrequency": 0, + "operatingFrequency": 192000000, + "roleStatus": "ready", + "serialNumber": "000000009", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + }, + "id": "233e169b-5d88-481d-bfe2-c909a2a859dd", + "leafModules": [ + { + "href": "/xr-networks/233e169b-5d88-481d-bfe2-c909a2a859dd/leafModules/7e9da66b-8bf8-4eea-b4a7-045e5ba3bfd8", + "id": "7e9da66b-8bf8-4eea-b4a7-045e5ba3bfd8", + "parentId": "233e169b-5d88-481d-bfe2-c909a2a859dd", + "rt": [ + "cm.xr-network.leafModule" + ], + "state": { + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.101.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-1/0/0:0", + "portId": "et-1/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:11:01", + "sysName": "PaloAlto" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + } + ], + "lifecycleState": "configured", + "module": { + "baudRate": 50, + "capacity": 100, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 192000000, + "currentRole": "leaf", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:01:01", + "maxAllowedDSCs": 4, + "modulation": "16QAM", + "moduleId": "7c33d7d0-4f7b-4525-5d57-58589adbd47c", + "moduleName": "XR LEAF 1", + "ncoFrequency": 0, + "operatingFrequency": 192000000, + "roleStatus": "ready", + "serialNumber": "00000000B", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + }, + { + "href": "/xr-networks/233e169b-5d88-481d-bfe2-c909a2a859dd/leafModules/7473b336-ef92-4508-b260-c096d05e4943", + "id": "7473b336-ef92-4508-b260-c096d05e4943", + "parentId": "233e169b-5d88-481d-bfe2-c909a2a859dd", + "rt": [ + "cm.xr-network.leafModule" + ], + "state": { + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.101.2", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-2/0/0:0", + "portId": "et-2/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:12:01", + "sysName": "Cupertino" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + } + ], + "lifecycleState": "configured", + "module": { + "baudRate": 50, + "capacity": 100, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 192000000, + "currentRole": "leaf", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:01:02", + "maxAllowedDSCs": 4, + "modulation": "16QAM", + "moduleId": "d68a6b4e-03e4-4c89-5ad5-c5e782325e40", + "moduleName": "XR LEAF 2", + "ncoFrequency": 0, + "operatingFrequency": 192000000, + "roleStatus": "ready", + "serialNumber": "00000000C", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + } + ], + "reachableModules": [ + { + "href": "/xr-networks/233e169b-5d88-481d-bfe2-c909a2a859dd/reachableModules/58785cd9-c642-43e4-a8b5-6d136acd8ae5", + "id": "58785cd9-c642-43e4-a8b5-6d136acd8ae5", + "parentId": "233e169b-5d88-481d-bfe2-c909a2a859dd", + "rt": [ + "cm.xr-network.reachableModule" + ], + "state": { + "discoveredTime": "2022-06-28T09:04:08Z", + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.101.3", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-3/0/0:0", + "portId": "et-3/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:13:01", + "sysName": "Sunnnyvale" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + } + ], + "module": { + "baudRate": 50, + "capacity": 100, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 193000000, + "currentRole": "leaf", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:01:03", + "modulation": "16QAM", + "moduleId": "572b2d8a-8d0b-40a0-5823-e53041ca2194", + "moduleName": "XR LEAF 3", + "ncoFrequency": 0, + "operatingFrequency": 193000000, + "roleStatus": "ready", + "serialNumber": "00000000D", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + }, + { + "href": "/xr-networks/233e169b-5d88-481d-bfe2-c909a2a859dd/reachableModules/be85d276-6f30-4c7b-9f63-de8679dfab85", + "id": "be85d276-6f30-4c7b-9f63-de8679dfab85", + "parentId": "233e169b-5d88-481d-bfe2-c909a2a859dd", + "rt": [ + "cm.xr-network.reachableModule" + ], + "state": { + "discoveredTime": "2022-06-28T09:04:05Z", + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.101.1", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-1/0/0:0", + "portId": "et-1/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:11:01", + "sysName": "PaloAlto" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + } + ], + "module": { + "baudRate": 50, + "capacity": 100, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 192000000, + "currentRole": "leaf", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:01:01", + "modulation": "16QAM", + "moduleId": "7c33d7d0-4f7b-4525-5d57-58589adbd47c", + "moduleName": "XR LEAF 1", + "ncoFrequency": 0, + "operatingFrequency": 192000000, + "roleStatus": "ready", + "serialNumber": "00000000B", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + }, + { + "href": "/xr-networks/233e169b-5d88-481d-bfe2-c909a2a859dd/reachableModules/212cf331-c133-4321-8e74-023549b9afee", + "id": "212cf331-c133-4321-8e74-023549b9afee", + "parentId": "233e169b-5d88-481d-bfe2-c909a2a859dd", + "rt": [ + "cm.xr-network.reachableModule" + ], + "state": { + "discoveredTime": "2022-06-28T09:04:06Z", + "endpoints": [ + { + "hostPort": { + "chassisId": "192.168.101.2", + "chassisIdSubtype": "networkAddress", + "portDescr": "et-2/0/0:0", + "portId": "et-2/0/0:0", + "portIdSubtype": "interfaceName", + "portSourceMAC": "58:00:BB:00:12:01", + "sysName": "Cupertino" + }, + "moduleIf": { + "clientIfAid": "XR-T1", + "clientIfColId": 1, + "clientIfPortSpeed": 100 + } + } + ], + "module": { + "baudRate": 50, + "capacity": 100, + "clientPortMode": "ethernet", + "configuredRole": "auto", + "constellationFrequency": 192000000, + "currentRole": "leaf", + "fiberConnectionMode": "dual", + "frequencyCtrl": "xr", + "macAddress": "00:0B:F8:00:01:02", + "modulation": "16QAM", + "moduleId": "d68a6b4e-03e4-4c89-5ad5-c5e782325e40", + "moduleName": "XR LEAF 2", + "ncoFrequency": 0, + "operatingFrequency": 192000000, + "roleStatus": "ready", + "serialNumber": "00000000C", + "trafficMode": "L1Mode", + "txPowerTargetPerDsc": -6.4 + } + } + } + ], + "rt": [ + "cm.xr-network" + ], + "state": { + "constellationFrequency": 192000000, + "controlLinks": [ + { + "conState": "active", + "destinationModuleId": "7c33d7d0-4f7b-4525-5d57-58589adbd47c", + "lastConStateChange": "2022-06-28T09:04:05Z", + "sourceModuleId": "d859de3c-c463-4be5-7a8d-a198275f10f4" + }, + { + "conState": "active", + "destinationModuleId": "d859de3c-c463-4be5-7a8d-a198275f10f4", + "lastConStateChange": "2022-06-28T09:04:05Z", + "sourceModuleId": "7c33d7d0-4f7b-4525-5d57-58589adbd47c" + }, + { + "conState": "active", + "destinationModuleId": "d68a6b4e-03e4-4c89-5ad5-c5e782325e40", + "lastConStateChange": "2022-06-28T09:04:06Z", + "sourceModuleId": "d859de3c-c463-4be5-7a8d-a198275f10f4" + }, + { + "conState": "active", + "destinationModuleId": "d859de3c-c463-4be5-7a8d-a198275f10f4", + "lastConStateChange": "2022-06-28T09:04:05Z", + "sourceModuleId": "d68a6b4e-03e4-4c89-5ad5-c5e782325e40" + } + ], + "lifecycleState": "configured", + "modulation": "16QAM" + } + } +] \ No newline at end of file diff --git a/src/device/service/drivers/xr/cm/tests/resources/transport-capacities-swagger-example.json b/src/device/service/drivers/xr/cm/tests/resources/transport-capacities-swagger-example.json new file mode 100644 index 0000000000000000000000000000000000000000..f4f1b00a30aa60d15962eee5fd7471a978c0ee67 --- /dev/null +++ b/src/device/service/drivers/xr/cm/tests/resources/transport-capacities-swagger-example.json @@ -0,0 +1,165 @@ +[ + { + "href": "/transport-capacities/6ce3aa86-2685-44b0-9f86-49e6a6c991a8", + "rt": [ + "cm.transport-capacity" + ], + "id": "6ce3aa86-2685-44b0-9f86-49e6a6c991a8", + "config": { + "name": "Transport capacity service example", + "capacityMode": "dedicatedDownlinkSymmetric" + }, + "state": { + "name": "Transport capacity service example", + "capacityMode": "dedicatedDownlinkSymmetric", + "lifecycleState": "configured", + "labels": [] + }, + "endpoints": [ + { + "href": "/transport-capacities/6ce3aa86-2685-44b0-9f86-49e6a6c991a8/endpoints/4511bc3d-617b-4757-9f4c-41bc7d8912eb", + "rt": [ + "cm.transport-capacity.hub" + ], + "id": "4511bc3d-617b-4757-9f4c-41bc7d8912eb", + "parentId": "6ce3aa86-2685-44b0-9f86-49e6a6c991a8", + "config": { + "capacity": 100, + "selector": { + "hostPortSelector": { + "chassisIdSubtype": "macAddress", + "chassisId": "28:c0:da:3e:3e:40", + "portIdSubtype": "interfaceName", + "portId": "et-1/0/1:2" + } + } + }, + "state": { + "capacity": 100, + "hostPort": { + "chassisIdSubtype": "macAddress", + "chassisId": "28:c0:da:3e:3e:40", + "portIdSubtype": "interfaceName", + "portId": "et-1/0/0:0", + "portSourceMAC": "da:3d:c2:4c:55:40", + "portDescr": "et-1/0/0:0" + }, + "moduleIf": { + "moduleId": "18e47620-8848-4c7e-710f-05c668478c57", + "moduleName": "XR Device", + "moduleMAC": "46:00:84:A0:0C:00", + "moduleSerialNumber": "12345678900", + "moduleCurrentRole": "hub", + "moduleClientIfColId": 1, + "clientIfAid": "XR T1", + "moduleClientIfPortSpeed": 100 + }, + "lifecycleState": "configured", + "labels": [] + } + }, + { + "href": "/transport-capacities/6ce3aa86-2685-44b0-9f86-49e6a6c991a8/endpoints/35e92b25-a682-4805-964a-6ce893a7aa56", + "rt": [ + "cm.transport-capacity.leaf" + ], + "id": "35e92b25-a682-4805-964a-6ce893a7aa56", + "parentId": "6ce3aa86-2685-44b0-9f86-49e6a6c991a8", + "config": { + "capacity": 100, + "selector": { + "hostPortSelector": { + "chassisIdSubtype": "macAddress", + "chassisId": "00:99:F8:2c:01:01", + "portIdSubtype": "interfaceName", + "portId": "et-1/0/0:0" + } + } + }, + "state": { + "capacity": 100, + "hostPort": { + "chassisIdSubtype": "macAddress", + "chassisId": "00:99:F8:2c:01:01", + "portIdSubtype": "interfaceName", + "portId": "et-1/0/0:0", + "portSourceMAC": "da:3d:c2:4c:55:40", + "portDescr": "et-1/0/0:0" + }, + "moduleIf": { + "moduleId": "23ffd75e-1a30-11ec-9621-0242ac130002", + "moduleName": "XR Device 2", + "moduleMAC": "46:00:84:A0:0C:02", + "moduleSerialNumber": "12345678902", + "moduleCurrentRole": "Leaf", + "moduleClientIfColId": 2, + "clientIfAid": "XR T1", + "moduleClientIfPortSpeed": 100 + }, + "lifecycleState": "configured", + "labels": [] + } + } + ], + "capacity-links": [ + { + "href": "/capacity-links/d9580972-7a72-43e7-91d9-5473251040ca", + "rt": [ + "cm.capacity-link" + ], + "id": "d9580972-7a72-43e7-91d9-5473251040ca", + "parentId": "6ce3aa86-2685-44b0-9f86-49e6a6c991a8", + "config": { + "directionality": "biDir", + "hubModule": { + "moduleId": "18e47620-8848-4c7e-710f-05c668478c57", + "dscgShared": false, + "dscs": [ + 7, + 5, + 3, + 1 + ] + }, + "leafModule": { + "moduleId": "23ffd75e-1a30-11ec-9621-0242ac130002", + "dscgShared": false, + "dscs": [ + 3, + 1, + 2, + 4 + ] + } + }, + "state": { + "directionality": "biDir", + "hubModule": { + "moduleId": "18e47620-8848-4c7e-710f-05c668478c57", + "dscgId": "552d4e35-c7fc-4fdf-bb31-1688f926582e", + "dscgShared": false, + "dscs": [ + 7, + 5, + 3, + 1 + ], + "lifecycleState": "configured" + }, + "leafModule": { + "moduleId": "23ffd75e-1a30-11ec-9621-0242ac130002", + "dscgId": "831884a0-fac7-4f1a-8c0d-74f82498921c", + "dscgShared": false, + "dscs": [ + 3, + 1, + 2, + 4 + ], + "lifecycleState": "configured" + } + } + } + ] + } + ] \ No newline at end of file diff --git a/src/device/service/drivers/xr/cm/tests/test_cm_connection.py b/src/device/service/drivers/xr/cm/tests/test_cm_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..60cbeac06abf312335abb693f4cd190281fffa7b --- /dev/null +++ b/src/device/service/drivers/xr/cm/tests/test_cm_connection.py @@ -0,0 +1,81 @@ +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import os +import requests_mock + +#from ..tf_service import TFService +from ..cm_connection import CmConnection + +access_token = r'{"access_token":"eyI3...","expires_in":3600,"refresh_expires_in":0,"refresh_token":"ey...","token_type":"Bearer","not-before-policy":0,"session_state":"f6e235c4-4ca4-4258-bede-4f2b7125adfb","scope":"profile email offline_access"}' + +resources = os.path.join(os.path.dirname(os.path.abspath(inspect.stack()[0][1])), "resources") +with open(os.path.join(resources, "constellations-expanded.json"), "r", encoding="UTF-8") as f: + res_constellations = f.read() +with open(os.path.join(resources, "constellation-by-name-hub1.json"), "r", encoding="UTF-8") as f: + res_constellation_by_name_hub1 = f.read() + +def mock_cm_connectivity(): + m = requests_mock.Mocker() + m.post('https://127.0.0.1:9999/realms/xr-cm/protocol/openid-connect/token', text=access_token) + return m + +def test_cmc_connect(): + # Valid access token + with requests_mock.Mocker() as m: + m.post('https://127.0.0.1:9999/realms/xr-cm/protocol/openid-connect/token', text=access_token) + cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False) + assert cm.Connect() + + # Valid JSON but no access token + with requests_mock.Mocker() as m: + m.post('https://127.0.0.1:9999/realms/xr-cm/protocol/openid-connect/token', text=r'{"a": "b"}') + cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False) + assert not cm.Connect() + + # Invalid JSON + with requests_mock.Mocker() as m: + m.post('https://127.0.0.1:9999/realms/xr-cm/protocol/openid-connect/token', text=r'}}}') + cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False) + assert not cm.Connect() + + with requests_mock.Mocker() as m: + # No mock present for the destination + cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False) + assert not cm.Connect() + +def test_cmc_get_constellations(): + with mock_cm_connectivity() as m: + m.get("https://127.0.0.1:9999/api/v1/ns/xr-networks?content=expanded", text=res_constellations) + cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False) + assert cm.Connect() + + # List all constellations + constellations = cm.list_constellations() + assert len(constellations) == 2 + cids = [c.constellation_id for c in constellations] + assert cids == ["6774cc4e-b0b1-43a1-923f-80fb1bec094b", "233e169b-5d88-481d-bfe2-c909a2a859dd"] + ifnames = [c.ifnames() for c in constellations] + assert ifnames == [['XR HUB 2|XR-T1', 'XR HUB 2|XR-T2', 'XR HUB 2|XR-T3', 'XR HUB 2|XR-T4', 'XR LEAF 3|XR-T1'], + ['XR HUB 1|XR-T1', 'XR HUB 1|XR-T2', 'XR HUB 1|XR-T3', 'XR HUB 1|XR-T4', 'XR LEAF 1|XR-T1', 'XR LEAF 2|XR-T1']] + + # Get constellation by hub module name + m.get("https://127.0.0.1:9999/api/v1/ns/xr-networks?content=expanded&content=expanded&q=%7B%22hubModule.state.module.moduleName%22%3A+%22XR+HUB+1%22%7D", text=res_constellation_by_name_hub1) + constellation = cm.get_constellation_by_hub_name("XR HUB 1") + assert constellation + assert constellation.ifnames() == ['XR HUB 1|XR-T1', 'XR HUB 1|XR-T2', 'XR HUB 1|XR-T3', 'XR HUB 1|XR-T4', 'XR LEAF 1|XR-T1', 'XR LEAF 2|XR-T1'] + assert constellation.constellation_id == "233e169b-5d88-481d-bfe2-c909a2a859dd" + \ No newline at end of file diff --git a/src/device/service/drivers/xr/cm/tests/test_connection.py b/src/device/service/drivers/xr/cm/tests/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..0792033a34d029628a853b9383af8c8a2c6272ad --- /dev/null +++ b/src/device/service/drivers/xr/cm/tests/test_connection.py @@ -0,0 +1,106 @@ +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import os +import json +import pytest + +from ..tf_service import TFService +from ..connection import Connection, InconsistentVlanConfiguration, ConnectionDeserializationError + +resources = os.path.join(os.path.dirname(os.path.abspath(inspect.stack()[0][1])), "resources") + +def test_connection_json(): + with open(os.path.join(resources, "connections-expanded.json"), "r", encoding="UTF-8") as f: + j = json.load(f) + connection = Connection(j[0]) + + assert connection.name == "FooBar123" + assert "name: FooBar123, id: /network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03, service-mode: portMode, end-points: [(XR LEAF 1|XR-T1, 0), (XR HUB 1|XR-T1, 0)]" == str(connection) + + config = connection.create_config() + expected_config = {'name': 'FooBar123', 'serviceMode': 'portMode', 'endpoints': [{'selector': {'ifSelectorByModuleName': {'moduleName': 'XR LEAF 1', 'moduleClientIfAid': 'XR-T1'}}}, {'selector': {'ifSelectorByModuleName': {'moduleName': 'XR HUB 1', 'moduleClientIfAid': 'XR-T1'}}}]} + assert config == expected_config + + # Remove mandatory key from leaf endpoint. It will not be parsed, but hub endpoint will + del j[0]["endpoints"][0]["state"]["moduleIf"]["clientIfAid"] + connection = Connection(j[0]) + assert "name: FooBar123, id: /network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03, service-mode: portMode, end-points: [(XR HUB 1|XR-T1, 0)]" == str(connection) + + # Remove Name, it is optional (although TF will always configure it) + del j[0]["state"]["name"] + connection = Connection(j[0]) + assert "name: <NO NAME>, id: /network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03, service-mode: portMode, end-points: [(XR HUB 1|XR-T1, 0)]" == str(connection) + + # Remove mandatory key, will raise an exception + del j[0]["state"] + with pytest.raises(ConnectionDeserializationError, match=r"Missing mandatory key 'state'"): + _connection = Connection(j[0]) + +def test_connection_ep_change_compute(): + with open(os.path.join(resources, "connections-expanded.json"), "r", encoding="UTF-8") as f: + j = json.load(f) + existing_connection = Connection(j[0]) + + # Changing only capacity + new_connection = Connection(from_tf_service=TFService("FooBar123", "XR LEAF 1|XR-T1", "XR HUB 1|XR-T1", 25)) + ep_deletes, ep_creates, ep_updates = new_connection.get_endpoint_updates(existing_connection) + assert not ep_deletes + assert not ep_creates + assert ep_updates == [('/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/230516d0-7e38-44b1-b174-1ba7d4454ee6', {'capacity': 25}), ('/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/1d58ba8f-4d51-4213-83e1-97a0e0bdd388', {'capacity': 25})] + + # Change one of endpoints + new_connection = Connection(from_tf_service=TFService("FooBar123", "XR LEAF 1|XR-T1", "XR HUB 1|changed here", 0)) + ep_deletes, ep_creates, ep_updates = new_connection.get_endpoint_updates(existing_connection) + assert ep_deletes == ['/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/1d58ba8f-4d51-4213-83e1-97a0e0bdd388'] + assert ep_creates == [{'selector': {'ifSelectorByModuleName': {'moduleClientIfAid': 'changed here', 'moduleName': 'XR HUB 1'}}}] + assert not ep_updates + + # Change one of the endpoints and capacity + new_connection = Connection(from_tf_service=TFService("FooBar123", "XR LEAF 1|XR-T1", "XR HUB 1|changed here", 125)) + ep_deletes, ep_creates, ep_updates = new_connection.get_endpoint_updates(existing_connection) + assert ep_deletes == ['/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/1d58ba8f-4d51-4213-83e1-97a0e0bdd388'] + assert ep_creates == [{'selector': {'ifSelectorByModuleName': {'moduleClientIfAid': 'changed here', 'moduleName': 'XR HUB 1'}}, "capacity": 125}] + assert ep_updates == [('/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/230516d0-7e38-44b1-b174-1ba7d4454ee6', {'capacity': 125})] + + # No change at all + new_connection = Connection(from_tf_service=TFService("FooBar123", "XR LEAF 1|XR-T1", "XR HUB 1|XR-T1", 0)) + ep_deletes, ep_creates, ep_updates = new_connection.get_endpoint_updates(existing_connection) + assert not ep_deletes + assert not ep_creates + assert not ep_updates + + # Order of endpoints does not matter + new_connection = Connection(from_tf_service=TFService("FooBar123", "XR HUB 1|XR-T1", "XR LEAF 1|XR-T1", 0)) + ep_deletes, ep_creates, ep_updates = new_connection.get_endpoint_updates(existing_connection) + assert not ep_deletes + assert not ep_creates + assert not ep_updates + +def test_connection_from_service(): + # Port mode + connection = Connection(from_tf_service=TFService("FooBar123", "XR LEAF 1|XR-T1", "XR HUB 1|XR-T1", 0)) + assert connection.create_config() == {'name': 'TF:FooBar123', 'serviceMode': 'portMode', 'endpoints': [{'selector': {'ifSelectorByModuleName': {'moduleName': 'XR LEAF 1', 'moduleClientIfAid': 'XR-T1'}}}, {'selector': {'ifSelectorByModuleName': {'moduleName': 'XR HUB 1', 'moduleClientIfAid': 'XR-T1'}}}]} + + # VTI mode + connection = Connection(from_tf_service=TFService("FooBar123", "XR LEAF 1|XR-T1.A", "XR HUB 1|XR-T1.100", 0)) + # In endpoint selectors VLANs are note present (CM does not know about them, encoding them to aids is purely internal to Teraflow) + # However VLAN adds outerVID and some other fields + assert connection.create_config() == {'name': 'TF:FooBar123', 'serviceMode': 'vtiP2pSymmetric', 'endpoints': [{'selector': {'ifSelectorByModuleName': {'moduleName': 'XR LEAF 1', 'moduleClientIfAid': 'XR-T1'}}}, {'selector': {'ifSelectorByModuleName': {'moduleName': 'XR HUB 1', 'moduleClientIfAid': 'XR-T1'}}}], 'outerVID': '100 ', 'mc': 'matchOuterVID'} + + # Invalid configuration, differring VLANs on different sides + with pytest.raises(InconsistentVlanConfiguration) as _e_info: + Connection(from_tf_service=TFService("FooBar123", "XR LEAF 1|XR-T1.200", "XR HUB 1|XR-T1.100", 0)) diff --git a/src/device/service/drivers/xr/cm/tests/test_constellation.py b/src/device/service/drivers/xr/cm/tests/test_constellation.py new file mode 100644 index 0000000000000000000000000000000000000000..82848b57e87884826e42234124aade4f447003fe --- /dev/null +++ b/src/device/service/drivers/xr/cm/tests/test_constellation.py @@ -0,0 +1,39 @@ +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import os +import json +import pytest +from ..constellation import Constellation, ConstellationDeserializationError + +resources = os.path.join(os.path.dirname(os.path.abspath(inspect.stack()[0][1])), "resources") + +def test_constellation_json(): + # With a name + with open(os.path.join(resources, "constellations-expanded.json"), "r", encoding="UTF-8") as f: + j = json.load(f) + + # Proper constellation with endpoints + constellation = Constellation(j[1]) + assert constellation.constellation_id == "233e169b-5d88-481d-bfe2-c909a2a859dd" + assert not constellation.is_vti_mode() + print(constellation.ifnames()) + assert ['XR HUB 1|XR-T1', 'XR HUB 1|XR-T2', 'XR HUB 1|XR-T3', 'XR HUB 1|XR-T4', 'XR LEAF 1|XR-T1', 'XR LEAF 2|XR-T1'] == constellation.ifnames() + + # Remove mandatory key, will raise an exception + del j[0]["hubModule"]["state"] + with pytest.raises(ConstellationDeserializationError, match=r"Missing mandatory key 'state'"): + _constellation = Constellation(j[0]) diff --git a/src/device/service/drivers/xr/cm/tests/test_transport_capacitity.py b/src/device/service/drivers/xr/cm/tests/test_transport_capacitity.py new file mode 100644 index 0000000000000000000000000000000000000000..cfdadae6a5e150e9890076dba0e657aea6fa3b1e --- /dev/null +++ b/src/device/service/drivers/xr/cm/tests/test_transport_capacitity.py @@ -0,0 +1,59 @@ +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import os +import json + +from ..tf_service import TFService +from ..transport_capacity import TransportCapacity + +resources = os.path.join(os.path.dirname(os.path.abspath(inspect.stack()[0][1])), "resources") + +def test_transport_capacity_json(): + # Swagger example has been manually edited to match schema, that is moduleClientIfAid --> clientIfAid in state + # Also names of leafs have been fixed to be unique + # Once CM implementation is available, actual data obtained from CM should be used as a test vector + with open(os.path.join(resources, "transport-capacities-swagger-example.json"), "r", encoding="UTF-8") as f: + j = json.load(f) + + # A pre-planned constellation without endpoints + tc = TransportCapacity(j[0]) + assert str(tc) == "name: Transport capacity service example, id: /transport-capacities/6ce3aa86-2685-44b0-9f86-49e6a6c991a8, capacity-mode: dedicatedDownlinkSymmetric, end-points: [(XR Device|XR T1, 100), (XR Device 2|XR T1, 100)]" + + config = tc.create_config() + assert config == {'config': {'name': 'Transport capacity service example'}, 'endpoints': [{'capacity': 100, 'selector': {'ifSelectorByModuleName': {'moduleName': 'XR Device', 'moduleClientIfAid': 'XR T1'}}}, {'capacity': 100, 'selector': {'ifSelectorByModuleName': {'moduleName': 'XR Device 2', 'moduleClientIfAid': 'XR T1'}}}]} + +def test_transport_capacity_comparison(): + # Same content must compare same + t1=TransportCapacity(from_tf_service=TFService("foo", "Hub|T1", "Leaf 1|T2", 25)) + t2=TransportCapacity(from_tf_service=TFService("foo", "Hub|T1", "Leaf 1|T2", 25)) + assert t1 == t2 + + # Order of endpoints does not matter: + t2=TransportCapacity(from_tf_service=TFService("foo", "Leaf 1|T2", "Hub|T1", 25)) + assert t1 == t2 + + # Different bandwidth + t2=TransportCapacity(from_tf_service=TFService("foo", "Hub|T1", "Leaf 1|T2", 50)) + assert t1 != t2 + + # Different leaf module + t2=TransportCapacity(from_tf_service=TFService("foo", "Hub|T1", "Leaf 2|T2", 25)) + assert t1 != t2 + + # Different leaf interface + t2=TransportCapacity(from_tf_service=TFService("foo", "Hub|T1", "Leaf 1|T3", 25)) + assert t1 != t2 diff --git a/src/device/service/drivers/xr/cm/tests/test_xr_service_set_config.py b/src/device/service/drivers/xr/cm/tests/test_xr_service_set_config.py new file mode 100644 index 0000000000000000000000000000000000000000..5a97e6ee2ee5d2ca119f2f8c3ffb776f34d8c1bc --- /dev/null +++ b/src/device/service/drivers/xr/cm/tests/test_xr_service_set_config.py @@ -0,0 +1,109 @@ +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import os +import json +import requests_mock +import traceback + +from ..cm_connection import CmConnection +from ..tf import set_config_for_service + +access_token = r'{"access_token":"eyI3...","expires_in":3600,"refresh_expires_in":0,"refresh_token":"ey...","token_type":"Bearer","not-before-policy":0,"session_state":"f6e235c4-4ca4-4258-bede-4f2b7125adfb","scope":"profile email offline_access"}' + +resources = os.path.join(os.path.dirname(os.path.abspath(inspect.stack()[0][1])), "resources") +with open(os.path.join(resources, "constellation-by-name-hub1.json"), "r", encoding="UTF-8") as f: + res_constellation_by_name_hub1 = f.read() +with open(os.path.join(resources, "connections-expanded.json"), "r", encoding="UTF-8") as f: + j = json.load(f) + # Fake reference data to have the name this test case needs for the given teraflow UUID + # (=no need for too large set of reference material) + j[0]["state"]["name"] = "TF:12345ABCDEFGHIJKLMN" + res_connection_by_name_json = [j[0]] # Single item list + +def mock_cm(): + m = requests_mock.Mocker() + m.post('https://127.0.0.1:9999/realms/xr-cm/protocol/openid-connect/token', text=access_token) + m.get("https://127.0.0.1:9999/api/v1/ns/xr-networks?content=expanded&content=expanded&q=%7B%22hubModule.state.module.moduleName%22%3A+%22XR+HUB+1%22%7D", text=res_constellation_by_name_hub1) + m.post("https://127.0.0.1:9999/api/v1/ncs/network-connections", text='[{"href":"/network-connections/c3b31608-0bb7-4a4f-9f9a-88b24a059432","rt":["cm.network-connection"]}]', status_code=202) + return m + +uuid = "12345ABCDEFGHIJKLMN" +config = { + "input_sip": "XR HUB 1|XR-T4;", + "output_sip": "XR LEAF 1|XR-T1", + "capacity_value": 125, + "capacity_unit": "gigabit" +} + +def _validate_result(result, expect): + if isinstance(result, Exception): + traceback.print_exception(result) + assert result is expect # Not, "is", not ==, we want type checking in this case, as also an exception can be returned (as return value) + +def test_xr_set_config(): + with mock_cm() as m: + cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False) + assert cm.Connect() + + constellation = cm.get_constellation_by_hub_name("XR HUB 1") + assert constellation + + result = set_config_for_service(cm, constellation, uuid, config) + _validate_result(result, True) + + called_mocks = [(r._request.method, r._request.url) for r in m._adapter.request_history] + expected_mocks = [ + ('POST', 'https://127.0.0.1:9999/realms/xr-cm/protocol/openid-connect/token'), # Authentication + ('GET', 'https://127.0.0.1:9999/api/v1/ns/xr-networks?content=expanded&content=expanded&q=%7B%22hubModule.state.module.moduleName%22%3A+%22XR+HUB+1%22%7D'), # Hub module by name + ('GET', 'https://127.0.0.1:9999/api/v1/ncs/network-connections?content=expanded&q=%7B%22state.name%22%3A+%22TF%3A12345ABCDEFGHIJKLMN%22%7D'), # Get by name, determine update or create + ('POST', 'https://127.0.0.1:9999/api/v1/ncs/network-connections') # Create + ] + assert called_mocks == expected_mocks + +def test_xr_set_config_update_case(): + with mock_cm() as m: + cm = CmConnection("127.0.0.1", 9999, "xr-user", "xr-password", tls_verify=False) + assert cm.Connect() + + constellation = cm.get_constellation_by_hub_name("XR HUB 1") + assert constellation + + # Fake existing service (--> update path is taken) + m.get("https://127.0.0.1:9999/api/v1/ncs/network-connections?content=expanded&q=%7B%22state.name%22%3A+%22TF%3A12345ABCDEFGHIJKLMN%22%7D", json=res_connection_by_name_json) + # Delete endpoint that is no longer necessary + m.delete("https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/1d58ba8f-4d51-4213-83e1-97a0e0bdd388", text="", status_code = 202) + # Update changed endpoint + m.put("https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/230516d0-7e38-44b1-b174-1ba7d4454ee6", text="", status_code = 202) + # Create the newly added endpoint + m.post("https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints", json=[{"href":"/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoint/somethingplausible","rt":["plausible"]}], status_code=202) + # Update the connection itself + m.put("https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03", text="", status_code=202) + + result = set_config_for_service(cm, constellation, uuid, config) + _validate_result(result, True) + + called_mocks = [(r._request.method, r._request.url) for r in m._adapter.request_history] + expected_mocks = [ + ('POST', 'https://127.0.0.1:9999/realms/xr-cm/protocol/openid-connect/token'), # Authentication + ('GET', 'https://127.0.0.1:9999/api/v1/ns/xr-networks?content=expanded&content=expanded&q=%7B%22hubModule.state.module.moduleName%22%3A+%22XR+HUB+1%22%7D'), # Hub module by name + ('GET', 'https://127.0.0.1:9999/api/v1/ncs/network-connections?content=expanded&q=%7B%22state.name%22%3A+%22TF%3A12345ABCDEFGHIJKLMN%22%7D'), # Get by name, determine update or create + ('DELETE', 'https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/1d58ba8f-4d51-4213-83e1-97a0e0bdd388'), # Delete unnecessary endpoint + ('PUT', 'https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints/230516d0-7e38-44b1-b174-1ba7d4454ee6'), # Update changed endpoint + ('POST', 'https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03/endpoints'), # Add new endpoint + ('PUT', 'https://127.0.0.1:9999/api/v1/ncs/network-connections/4505d5d3-b2f3-40b8-8ec2-4a5b28523c03') # Update the connection itself + ] + assert called_mocks == expected_mocks diff --git a/src/device/service/drivers/xr/cm/tf.py b/src/device/service/drivers/xr/cm/tf.py new file mode 100644 index 0000000000000000000000000000000000000000..1872bfe6c374c9e295b71c8f9673689c67202cd9 --- /dev/null +++ b/src/device/service/drivers/xr/cm/tf.py @@ -0,0 +1,71 @@ +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, Union +import logging +from .cm_connection import CmConnection +from .constellation import Constellation +from .tf_service import TFService +from .transport_capacity import TransportCapacity +from .connection import Connection + +LOGGER = logging.getLogger(__name__) + +def _get_value_or_default(config: Dict[str, any], key: str, default_value: any) -> any: + if key not in config: + return default_value + else: + return config[key] + +def _get_capacity(config) -> int: + if "capacity_unit" not in config or "capacity_value" not in config: + return 0 + if config["capacity_unit"] != "gigabit": + return 0 + return config["capacity_value"] + +def set_config_for_service(cm_connection: CmConnection, constellation: Constellation, uuid: str, config: Dict[str, any]) -> Union[bool, Exception]: + try: + service = TFService(uuid, config["input_sip"], config["output_sip"], _get_capacity(config)) + if constellation.is_vti_mode(): + desired_tc = TransportCapacity(from_tf_service=service) + active_tc = cm_connection.get_transport_capacity_by_name(service.name()) + if desired_tc != active_tc: + if active_tc: + LOGGER.info(f"set_config_for_service: Transport Capacity change for {uuid}, ({active_tc=}, {desired_tc=}), performing service impacting update") + # Remove previous connection (if any) + active_connection = cm_connection.get_connection_by_name(service.name()) + if active_connection: + cm_connection.delete_connection(active_connection.href) + # Delete old TC + cm_connection.delete_transport_capacity(active_tc.href) + if desired_tc: + href = cm_connection.create_transport_capacity(desired_tc) + if not href: + LOGGER.error(f"set_config_for_service: Failed to create Transport Capacity ({desired_tc=})") + return False + connection = Connection(from_tf_service=service) + href = cm_connection.create_or_update_connection(connection) + if href: + LOGGER.info(f"set_config_for_service: Created service {uuid} as {href} (connection={str(connection)})") + return True + else: + LOGGER.error(f"set_config_for_service: Service creation failure for {uuid} (connection={str(connection)})") + return False + # Intentionally catching all exceptions, as they are stored in a list as return values + # by the caller + # pylint: disable=broad-except + except Exception as e: + return e diff --git a/src/device/service/drivers/xr/cm/tf_service.py b/src/device/service/drivers/xr/cm/tf_service.py new file mode 100644 index 0000000000000000000000000000000000000000..7ba8d9ee44bca7820857c50ff5894099c75e8c5b --- /dev/null +++ b/src/device/service/drivers/xr/cm/tf_service.py @@ -0,0 +1,67 @@ +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring, wildcard-import, unused-wildcard-import +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import Tuple, Optional, Dict, List +from .utils import * + +@dataclass(init=False) +class TFService: + input_sip: str + output_sip: str + uuid: str + capacity: int + + def __init__(self, uuid, input_sip, output_sip, capacity): + self.uuid = uuid + self.input_sip = input_sip + self.output_sip = output_sip + # Capacity must be in multiples of 25 gigabits + if 0 == capacity: + self.capacity = 0 + else: + self.capacity = math.ceil(capacity/25) * 25 + + def __str__(self): + return f"({self.uuid}, {self.input_sip}, {self.output_sip}, {self.capacity})" + + def name(self) -> str: + return f"TF:{self.uuid}" + + def input_mod_aid_vlan(self) -> Tuple[str, str, Optional[str]]: + return ifname_to_module_aid_vlan(self.input_sip) + + def output_mod_aid_vlan(self) -> Tuple[str, str, Optional[str]]: + return ifname_to_module_aid_vlan(self.output_sip) + + # Return endpoints in a form suitable for selectors in various + # JSON constructs used by the CM API + def get_endpoint_selectors(self) -> List[Dict]: + return [make_selector(*self.input_mod_aid_vlan()), make_selector(*self.output_mod_aid_vlan())] + + # -> List[Tuple(str, str)] + def get_endpoints_mod_aid(self): + m1, a1, _ = self.input_mod_aid_vlan() + m2, a2, _ = self.output_mod_aid_vlan() + + return [(m1, a1), (m2, a2)] + + # -> List[Tuple(str, str)] + def get_endpoints_mod_aid_vlan(self): + m1, a1, v1 = self.input_mod_aid_vlan() + m2, a2, v2 = self.output_mod_aid_vlan() + + return [(m1, a1, v1), (m2, a2, v2)] diff --git a/src/device/service/drivers/xr/cm/transport_capacity.py b/src/device/service/drivers/xr/cm/transport_capacity.py new file mode 100644 index 0000000000000000000000000000000000000000..d28d5b13707249a60fde04ccf4a1f1d35cc45cc8 --- /dev/null +++ b/src/device/service/drivers/xr/cm/transport_capacity.py @@ -0,0 +1,109 @@ +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Dict +from dataclasses import dataclass + +from .utils import make_selector + +from .tf_service import TFService + +@dataclass +class TCEndpoint: + module: str + port: str + capacity: int + + def ifname(self) -> str: + return self.module + "|" + self.port + + def __str__(self): + return f"({self.ifname()}, {self.capacity})" + + def create_config(self) -> Dict[str, any]: + cfg = { + "capacity": self.capacity, + "selector": make_selector(self.module, self.port, None) + } + return cfg + +class TransportCapacityDeserializationError(Exception): + pass + +class TransportCapacity: + def __init__(self, from_json=None, from_tf_service: Optional[TFService] = None): + def get_endpoint_from_json(endpoint: dict[str, any]) -> Optional[TCEndpoint]: + try: + return TCEndpoint(endpoint["state"]["moduleIf"]["moduleName"], endpoint["state"]["moduleIf"]["clientIfAid"], + endpoint["state"]["capacity"]) + except KeyError: + return None + + if from_json: + try: + self.href = from_json["href"] + + state = from_json["state"] + # Name is optional + self.name = state["name"] if "name" in state else None + self.capacity_mode = state["capacityMode"] + + self.endpoints = [] + for epj in from_json["endpoints"]: + ep = get_endpoint_from_json(epj) + if ep: + self.endpoints.append(ep) + + #self.__cm_data = from_json + except KeyError as e: + raise TransportCapacityDeserializationError(f"Missing mandatory key {str(e)}") from e + elif from_tf_service: + self.href = None + self.state = "tfInternalObject" + self.name = from_tf_service.name() + self.capacity_mode = "dedicatedDownlinkSymmetric" + self.endpoints = [TCEndpoint(mod, port, from_tf_service.capacity) for mod,port in from_tf_service.get_endpoints_mod_aid()] + #self.__cm_data = None + else: + # May support other initializations in future + raise TransportCapacityDeserializationError("Initializer missing") + + # Return suitable config for CM + def create_config(self) -> Dict[str, any]: + cfg = {} + if self.name is not None: + cfg["config"] = {"name": self.name } + cfg["endpoints"] = [ep.create_config() for ep in self.endpoints] + return cfg + + def __str__(self): + name = self.name if self.name else "<NO NAME>" + endpoints = ", ".join((str(ep) for ep in self.endpoints)) + return f"name: {name}, id: {self.href}, capacity-mode: {self.capacity_mode}, end-points: [{endpoints}]" + + # Comparison for purpose of re-configuring + def __eq__(self, obj): + if not isinstance(obj, TransportCapacity): + return False + if self.name != obj.name: + return False + if self.capacity_mode != obj.capacity_mode: + return False + if sorted(self.endpoints, key=str) != sorted(obj.endpoints, key=str): + return False + return True + + def __ne__(self, obj): + return not self == obj diff --git a/src/device/service/drivers/xr/cm/utils.py b/src/device/service/drivers/xr/cm/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cdf9e58c348f572c1547bb392a8cddba7669d0b0 --- /dev/null +++ b/src/device/service/drivers/xr/cm/utils.py @@ -0,0 +1,71 @@ +#pylint: disable=invalid-name, missing-function-docstring, line-too-long, logging-fstring-interpolation, missing-class-docstring, missing-module-docstring +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Tuple, Optional, Dict + +class InvalidIfnameError(Exception): + def __init__(self, ifname): + # Call the base class constructor with the parameters it needs + super().__init__(f"Invalid interface name {ifname}, expecting format \"MODULENAME|PORTNAME\" or \"MODULENAME|PORTNAME.VLAN\"") + +def ifname_to_module_and_aid(ifname: str) -> Tuple[str, str]: + a = ifname.split("|") + if len(a) != 2: + raise InvalidIfnameError(ifname) + return (a[0], a[1]) + +def virtual_aid_to_aid_and_vlan(ifname: str) -> Tuple[str, Optional[str]]: + a = ifname.split(".") + if len(a) == 1: + return (ifname, None) + if len(a) != 2: + raise InvalidIfnameError(ifname) + return (a[0], a[1]) + +def ifname_to_module_aid_vlan(ifname: str) -> Tuple[str, str, Optional[str]]: + module, aid = ifname_to_module_and_aid(ifname) + aid, vlan = virtual_aid_to_aid_and_vlan(aid) + return (module, aid, vlan) + +# For some reason when writing config, selector has moduleClientIfAid, when reading +# state it has clientIfAid... +def make_selector(mod, aid, _vlan) -> Dict[str, Any]: + selector = { + "ifSelectorByModuleName": { + "moduleName": mod, + "moduleClientIfAid": aid, + } + } + return selector + +def get_constellation_module_ifnames(module): + ifnames = [] + try: + module_state = module["state"] + module_name = module_state["module"]["moduleName"] + if "endpoints" in module_state: + for endpoint in module_state["endpoints"]: + try: + ifname = endpoint["moduleIf"]["clientIfAid"] + ifnames.append(f"{module_name}|{ifname}") + except KeyError: + pass + except KeyError: + pass + return ifnames + +def set_optional_parameter(container: Dict[str, any], key:str, value: Optional[any]): + if value is not None: + container[key] = value diff --git a/src/dlt/connector/Config.py b/src/dlt/connector/Config.py index 9953c820575d42fa88351cc8de022d880ba96e6a..bdf9f306959e86160012541e8a72cc9aabb019c0 100644 --- a/src/dlt/connector/Config.py +++ b/src/dlt/connector/Config.py @@ -11,3 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import os + +DEFAULT_DLT_GATEWAY_HOST = '127.0.0.1' +DEFAULT_DLT_GATEWAY_PORT = '50051' + +# Find IP:port of gateway container as follows: +# - first check env vars DLT_GATEWAY_HOST & DLT_GATEWAY_PORT +# - if not set, use DEFAULT_DLT_GATEWAY_HOST & DEFAULT_DLT_GATEWAY_PORT +DLT_GATEWAY_HOST = str(os.environ.get('DLT_GATEWAY_HOST', DEFAULT_DLT_GATEWAY_HOST)) +DLT_GATEWAY_PORT = int(os.environ.get('DLT_GATEWAY_PORT', DEFAULT_DLT_GATEWAY_PORT)) diff --git a/src/dlt/connector/Dockerfile b/src/dlt/connector/Dockerfile index 51e9ec506f0c8a6c35ceac68833e3ad683ef8e63..c5d600ee0d55deb5a8bd4dca2d4f12cd092ad420 100644 --- a/src/dlt/connector/Dockerfile +++ b/src/dlt/connector/Dockerfile @@ -64,6 +64,8 @@ RUN python3 -m pip install -r requirements.txt WORKDIR /var/teraflow COPY src/context/. context/ COPY src/dlt/connector/. dlt/connector +COPY src/interdomain/. interdomain/ +COPY src/slice/. slice/ # Start the service ENTRYPOINT ["python", "-m", "dlt.connector.service"] diff --git a/src/dlt/connector/client/DltConnectorClient.py b/src/dlt/connector/client/DltConnectorClient.py index f48562996b067ca81a99b6ceb7288029be7ba1c8..1ca511d0434dd72458982bf7c7d55d8bbd1859f1 100644 --- a/src/dlt/connector/client/DltConnectorClient.py +++ b/src/dlt/connector/client/DltConnectorClient.py @@ -15,7 +15,8 @@ import grpc, logging from common.Constants import ServiceNameEnum from common.Settings import get_service_host, get_service_port_grpc -from common.proto.context_pb2 import DeviceId, Empty, ServiceId, SliceId +from common.proto.context_pb2 import Empty, TopologyId +from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceStub from common.tools.client.RetryDecorator import retry, delay_exponential from common.tools.grpc.Tools import grpc_message_to_json_string @@ -46,49 +47,63 @@ class DltConnectorClient: self.stub = None @RETRY_DECORATOR - def RecordAll(self, request : Empty) -> Empty: + def RecordAll(self, request : TopologyId) -> Empty: LOGGER.debug('RecordAll request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.RecordAll(request) LOGGER.debug('RecordAll result: {:s}'.format(grpc_message_to_json_string(response))) return response @RETRY_DECORATOR - def RecordAllDevices(self, request : Empty) -> Empty: + def RecordAllDevices(self, request : TopologyId) -> Empty: LOGGER.debug('RecordAllDevices request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.RecordAllDevices(request) LOGGER.debug('RecordAllDevices result: {:s}'.format(grpc_message_to_json_string(response))) return response @RETRY_DECORATOR - def RecordDevice(self, request : DeviceId) -> Empty: + def RecordDevice(self, request : DltDeviceId) -> Empty: LOGGER.debug('RecordDevice request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.RecordDevice(request) LOGGER.debug('RecordDevice result: {:s}'.format(grpc_message_to_json_string(response))) return response @RETRY_DECORATOR - def RecordAllServices(self, request : Empty) -> Empty: + def RecordAllLinks(self, request : TopologyId) -> Empty: + LOGGER.debug('RecordAllLinks request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RecordAllLinks(request) + LOGGER.debug('RecordAllLinks result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def RecordLink(self, request : DltLinkId) -> Empty: + LOGGER.debug('RecordLink request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RecordLink(request) + LOGGER.debug('RecordLink result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def RecordAllServices(self, request : TopologyId) -> Empty: LOGGER.debug('RecordAllServices request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.RecordAllServices(request) LOGGER.debug('RecordAllServices result: {:s}'.format(grpc_message_to_json_string(response))) return response @RETRY_DECORATOR - def RecordService(self, request : ServiceId) -> Empty: + def RecordService(self, request : DltServiceId) -> Empty: LOGGER.debug('RecordService request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.RecordService(request) LOGGER.debug('RecordService result: {:s}'.format(grpc_message_to_json_string(response))) return response @RETRY_DECORATOR - def RecordAllSlices(self, request : Empty) -> Empty: + def RecordAllSlices(self, request : TopologyId) -> Empty: LOGGER.debug('RecordAllSlices request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.RecordAllSlices(request) LOGGER.debug('RecordAllSlices result: {:s}'.format(grpc_message_to_json_string(response))) return response @RETRY_DECORATOR - def RecordSlice(self, request : SliceId) -> Empty: + def RecordSlice(self, request : DltSliceId) -> Empty: LOGGER.debug('RecordSlice request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.RecordSlice(request) LOGGER.debug('RecordSlice result: {:s}'.format(grpc_message_to_json_string(response))) diff --git a/src/dlt/connector/client/DltEventsCollector.py b/src/dlt/connector/client/DltEventsCollector.py index 6fe2474cead37094c507a8a612181dc7f7243544..d022ac0f0144eecfcdb706665a8bde81fa54492f 100644 --- a/src/dlt/connector/client/DltEventsCollector.py +++ b/src/dlt/connector/client/DltEventsCollector.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, logging, queue, threading +import grpc, logging, queue, threading, time from common.proto.dlt_gateway_pb2 import DltRecordSubscription from common.tools.grpc.Tools import grpc_message_to_json_string from dlt.connector.client.DltGatewayClient import DltGatewayClient @@ -20,32 +20,36 @@ from dlt.connector.client.DltGatewayClient import DltGatewayClient LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -class DltEventsCollector: +class DltEventsCollector(threading.Thread): def __init__( self, dltgateway_client : DltGatewayClient, log_events_received : bool = False, ) -> None: - self._events_queue = queue.Queue() + super().__init__(name='DltEventsCollector', daemon=True) + self._dltgateway_client = dltgateway_client self._log_events_received = log_events_received - subscription = DltRecordSubscription() # bu default subscribe to all - self._dltgateway_stream = dltgateway_client.SubscribeToDlt(subscription) - self._dltgateway_thread = self._create_collector_thread(self._dltgateway_stream) - - def _create_collector_thread(self, stream, as_daemon : bool = False): - return threading.Thread(target=self._collect, args=(stream,), daemon=as_daemon) - - def _collect(self, events_stream) -> None: - try: - for event in events_stream: - if self._log_events_received: - LOGGER.info('[_collect] event: {:s}'.format(grpc_message_to_json_string(event))) - self._events_queue.put_nowait(event) - except grpc.RpcError as e: - if e.code() != grpc.StatusCode.CANCELLED: # pylint: disable=no-member - raise # pragma: no cover + self._events_queue = queue.Queue() + self._terminate = threading.Event() + self._dltgateway_stream = None - def start(self): - if self._dltgateway_thread is not None: self._dltgateway_thread.start() + def run(self) -> None: + self._terminate.clear() + while not self._terminate.is_set(): + try: + subscription = DltRecordSubscription() # bu default subscribe to all + self._dltgateway_stream = self._dltgateway_client.SubscribeToDlt(subscription) + for event in self._dltgateway_stream: + if self._log_events_received: + LOGGER.info('[_collect] event: {:s}'.format(grpc_message_to_json_string(event))) + self._events_queue.put_nowait(event) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.UNAVAILABLE: # pylint: disable=no-member + time.sleep(0.5) + continue + elif e.code() == grpc.StatusCode.CANCELLED: # pylint: disable=no-member + break + else: + raise # pragma: no cover def get_event(self, block : bool = True, timeout : float = 0.1): try: @@ -68,5 +72,5 @@ class DltEventsCollector: return sorted(events, key=lambda e: e.event.timestamp.timestamp) def stop(self): + self._terminate.set() if self._dltgateway_stream is not None: self._dltgateway_stream.cancel() - if self._dltgateway_thread is not None: self._dltgateway_thread.join() diff --git a/src/dlt/connector/client/DltGatewayClient.py b/src/dlt/connector/client/DltGatewayClient.py index f1f8dec391bb836cea33422176730d250090429d..e2f5530f9a971d0a25cac042d361c52db5c16304 100644 --- a/src/dlt/connector/client/DltGatewayClient.py +++ b/src/dlt/connector/client/DltGatewayClient.py @@ -14,14 +14,13 @@ from typing import Iterator import grpc, logging -from common.Constants import ServiceNameEnum -from common.Settings import get_service_host, get_service_port_grpc from common.proto.context_pb2 import Empty, TeraFlowController from common.proto.dlt_gateway_pb2 import ( DltPeerStatus, DltPeerStatusList, DltRecord, DltRecordEvent, DltRecordId, DltRecordStatus, DltRecordSubscription) from common.proto.dlt_gateway_pb2_grpc import DltGatewayServiceStub from common.tools.client.RetryDecorator import retry, delay_exponential from common.tools.grpc.Tools import grpc_message_to_json_string +from dlt.connector.Config import DLT_GATEWAY_HOST, DLT_GATEWAY_PORT LOGGER = logging.getLogger(__name__) MAX_RETRIES = 15 @@ -30,8 +29,8 @@ RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, class DltGatewayClient: def __init__(self, host=None, port=None): - if not host: host = get_service_host(ServiceNameEnum.DLT) - if not port: port = get_service_port_grpc(ServiceNameEnum.DLT) + if not host: host = DLT_GATEWAY_HOST + if not port: port = DLT_GATEWAY_PORT self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint)) self.channel = None diff --git a/src/dlt/connector/main_test.py b/src/dlt/connector/main_test.py index 4ad90eb35444b7ba4de00159372e466e8fc68905..a877a5ce39a29dd8bf37416868d9c5a701912259 100644 --- a/src/dlt/connector/main_test.py +++ b/src/dlt/connector/main_test.py @@ -1,8 +1,11 @@ # pip install grpcio==1.47.0 grpcio-tools==1.47.0 protobuf==3.20.1 +# PYTHONPATH=./src python # PYTHONPATH=/home/cttc/teraflow/src python -m dlt.connector.main_test import logging, sys, time -from common.proto.dlt_gateway_pb2 import DLTRECORDOPERATION_ADD, DLTRECORDOPERATION_UPDATE, DLTRECORDTYPE_DEVICE, DltRecord +from common.proto.dlt_gateway_pb2 import ( + DLTRECORDOPERATION_ADD, DLTRECORDOPERATION_UNDEFINED, DLTRECORDOPERATION_UPDATE, DLTRECORDTYPE_DEVICE, + DLTRECORDTYPE_UNDEFINED, DltRecord, DltRecordId) from common.tools.object_factory.Device import json_device from common.tools.grpc.Tools import grpc_message_to_json_string from src.common.proto.context_pb2 import DEVICEOPERATIONALSTATUS_ENABLED, Device @@ -12,13 +15,33 @@ from .client.DltEventsCollector import DltEventsCollector logging.basicConfig(level=logging.INFO) LOGGER = logging.getLogger(__name__) +DLT_GATEWAY_HOST = '127.0.0.1' +DLT_GATEWAY_PORT = 30551 #50051 + +def record_found(record : DltRecord) -> bool: + found = True + found = found and (len(record.record_id.domain_uuid.uuid) > 0) + found = found and (record.record_id.type != DLTRECORDTYPE_UNDEFINED) + found = found and (len(record.record_id.record_uuid.uuid) > 0) + #found = found and (record.operation != DLTRECORDOPERATION_UNDEFINED) + found = found and (len(record.data_json) > 0) + return found + def main(): - dltgateway_client = DltGatewayClient(host='127.0.0.1', port=50051) + dltgateway_client = DltGatewayClient(host=DLT_GATEWAY_HOST, port=DLT_GATEWAY_PORT) dltgateway_collector = DltEventsCollector(dltgateway_client, log_events_received=True) dltgateway_collector.start() time.sleep(3) + # Check record exists + dri = DltRecordId() + dri.domain_uuid.uuid = 'non-existing-domain' + dri.record_uuid.uuid = 'non-existing-record' + dri.type = DLTRECORDTYPE_DEVICE + reply = dltgateway_client.GetFromDlt(dri) + assert not record_found(reply), 'Record should not exist' + device = Device(**json_device('dev-1', 'packet-router', DEVICEOPERATIONALSTATUS_ENABLED)) r2dlt_req = DltRecord() diff --git a/src/dlt/connector/service/DltConnector.py b/src/dlt/connector/service/DltConnector.py deleted file mode 100644 index 0c42d66852e8eb895a07c761f7535a0d768a9e91..0000000000000000000000000000000000000000 --- a/src/dlt/connector/service/DltConnector.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, threading -from common.tools.grpc.Tools import grpc_message_to_json_string -from context.client.ContextClient import ContextClient -from context.client.EventsCollector import EventsCollector -from dlt.connector.client.DltConnectorClient import DltConnectorClient - -LOGGER = logging.getLogger(__name__) - -class DltConnector: - def __init__(self) -> None: - LOGGER.debug('Creating connector...') - self._terminate = threading.Event() - self._thread = None - LOGGER.debug('Connector created') - - def start(self): - self._terminate.clear() - self._thread = threading.Thread(target=self._run_events_collector) - self._thread.start() - - def _run_events_collector(self) -> None: - dltconnector_client = DltConnectorClient() - context_client = ContextClient() - events_collector = EventsCollector(context_client) - events_collector.start() - - while not self._terminate.is_set(): - event = events_collector.get_event() - LOGGER.info('Event from Context Received: {:s}'.format(grpc_message_to_json_string(event))) - - events_collector.stop() - context_client.close() - dltconnector_client.close() - - def stop(self): - self._terminate.set() - self._thread.join() diff --git a/src/dlt/connector/service/DltConnectorServiceServicerImpl.py b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py index 860e46f3ab88b097f4aa8e06508b19518055e46f..6c5401cb1724f8a759001d790e835ab78ce4c6c6 100644 --- a/src/dlt/connector/service/DltConnectorServiceServicerImpl.py +++ b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py @@ -13,9 +13,15 @@ # limitations under the License. import grpc, logging -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method -from common.proto.context_pb2 import DeviceId, Empty, ServiceId, SliceId +from common.proto.context_pb2 import DeviceId, Empty, LinkId, ServiceId, SliceId, TopologyId +from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceServicer +from common.proto.dlt_gateway_pb2 import DltRecord, DltRecordId, DltRecordOperationEnum, DltRecordTypeEnum +from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from dlt.connector.client.DltGatewayClient import DltGatewayClient +from .tools.Checkers import record_exists LOGGER = logging.getLogger(__name__) @@ -23,6 +29,7 @@ SERVICE_NAME = 'DltConnector' METHOD_NAMES = [ 'RecordAll', 'RecordAllDevices', 'RecordDevice', + 'RecordAllLinks', 'RecordLink', 'RecordAllServices', 'RecordService', 'RecordAllSlices', 'RecordSlice', ] @@ -34,29 +41,121 @@ class DltConnectorServiceServicerImpl(DltConnectorServiceServicer): LOGGER.debug('Servicer Created') @safe_and_metered_rpc_method(METRICS, LOGGER) - def RecordAll(self, request : Empty, context : grpc.ServicerContext) -> Empty: + def RecordAll(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + return Empty() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RecordAllDevices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + return Empty() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RecordDevice(self, request : DltDeviceId, context : grpc.ServicerContext) -> Empty: + context_client = ContextClient() + device = context_client.GetDevice(request.device_id) + + dltgateway_client = DltGatewayClient() + + dlt_record_id = DltRecordId() + dlt_record_id.domain_uuid.uuid = request.topology_id.topology_uuid.uuid + dlt_record_id.type = DltRecordTypeEnum.DLTRECORDTYPE_DEVICE + dlt_record_id.record_uuid.uuid = device.device_id.device_uuid.uuid + + LOGGER.info('[RecordDevice] sent dlt_record_id = {:s}'.format(grpc_message_to_json_string(dlt_record_id))) + dlt_record = dltgateway_client.GetFromDlt(dlt_record_id) + LOGGER.info('[RecordDevice] recv dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) + + exists = record_exists(dlt_record) + LOGGER.info('[RecordDevice] exists = {:s}'.format(str(exists))) + + dlt_record = DltRecord() + dlt_record.record_id.CopyFrom(dlt_record_id) + dlt_record.operation = \ + DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE \ + if exists else \ + DltRecordOperationEnum.DLTRECORDOPERATION_ADD + + dlt_record.data_json = grpc_message_to_json_string(device) + LOGGER.info('[RecordDevice] sent dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) + dlt_record_status = dltgateway_client.RecordToDlt(dlt_record) + LOGGER.info('[RecordDevice] recv dlt_record_status = {:s}'.format(grpc_message_to_json_string(dlt_record_status))) return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) - def RecordAllDevices(self, request : Empty, context : grpc.ServicerContext) -> Empty: + def RecordAllLinks(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) - def RecordDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty: + def RecordLink(self, request : DltLinkId, context : grpc.ServicerContext) -> Empty: + context_client = ContextClient() + link = context_client.GetLink(request.link_id) + + dltgateway_client = DltGatewayClient() + + dlt_record_id = DltRecordId() + dlt_record_id.domain_uuid.uuid = request.topology_id.topology_uuid.uuid + dlt_record_id.type = DltRecordTypeEnum.DLTRECORDTYPE_LINK + dlt_record_id.record_uuid.uuid = link.link_id.link_uuid.uuid + + LOGGER.info('[RecordLink] sent dlt_record_id = {:s}'.format(grpc_message_to_json_string(dlt_record_id))) + dlt_record = dltgateway_client.GetFromDlt(dlt_record_id) + LOGGER.info('[RecordLink] recv dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) + + exists = record_exists(dlt_record) + LOGGER.info('[RecordLink] exists = {:s}'.format(str(exists))) + + dlt_record = DltRecord() + dlt_record.record_id.CopyFrom(dlt_record_id) + dlt_record.operation = \ + DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE \ + if exists else \ + DltRecordOperationEnum.DLTRECORDOPERATION_ADD + + dlt_record.data_json = grpc_message_to_json_string(link) + LOGGER.info('[RecordLink] sent dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) + dlt_record_status = dltgateway_client.RecordToDlt(dlt_record) + LOGGER.info('[RecordLink] recv dlt_record_status = {:s}'.format(grpc_message_to_json_string(dlt_record_status))) return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) - def RecordAllServices(self, request : Empty, context : grpc.ServicerContext) -> Empty: + def RecordAllServices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) - def RecordService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: + def RecordService(self, request : DltServiceId, context : grpc.ServicerContext) -> Empty: return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) - def RecordAllSlices(self, request : Empty, context : grpc.ServicerContext) -> Empty: + def RecordAllSlices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) - def RecordSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty: + def RecordSlice(self, request : DltSliceId, context : grpc.ServicerContext) -> Empty: + context_client = ContextClient() + slice_ = context_client.GetSlice(request.slice_id) + + dltgateway_client = DltGatewayClient() + + dlt_record_id = DltRecordId() + dlt_record_id.domain_uuid.uuid = request.topology_id.topology_uuid.uuid + dlt_record_id.type = DltRecordTypeEnum.DLTRECORDTYPE_SLICE + dlt_record_id.record_uuid.uuid = slice_.slice_id.slice_uuid.uuid + + LOGGER.info('[RecordSlice] sent dlt_record_id = {:s}'.format(grpc_message_to_json_string(dlt_record_id))) + dlt_record = dltgateway_client.GetFromDlt(dlt_record_id) + LOGGER.info('[RecordSlice] recv dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) + + exists = record_exists(dlt_record) + LOGGER.info('[RecordSlice] exists = {:s}'.format(str(exists))) + + dlt_record = DltRecord() + dlt_record.record_id.CopyFrom(dlt_record_id) + dlt_record.operation = \ + DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE \ + if exists else \ + DltRecordOperationEnum.DLTRECORDOPERATION_ADD + + dlt_record.data_json = grpc_message_to_json_string(slice_) + LOGGER.info('[RecordSlice] sent dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) + dlt_record_status = dltgateway_client.RecordToDlt(dlt_record) + LOGGER.info('[RecordSlice] recv dlt_record_status = {:s}'.format(grpc_message_to_json_string(dlt_record_status))) return Empty() diff --git a/src/dlt/connector/service/__main__.py b/src/dlt/connector/service/__main__.py index 435a93f61bf934a17d9c044756648176e9cb2d2d..76e7bc6f1bb1b50e736327d8f08c0880e45c6835 100644 --- a/src/dlt/connector/service/__main__.py +++ b/src/dlt/connector/service/__main__.py @@ -18,6 +18,7 @@ from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, wait_for_environment_variables) +from .event_dispatcher.DltEventDispatcher import DltEventDispatcher from .DltConnectorService import DltConnectorService terminate = threading.Event() @@ -31,7 +32,7 @@ def main(): global LOGGER # pylint: disable=global-statement log_level = get_log_level() - logging.basicConfig(level=log_level) + logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") LOGGER = logging.getLogger(__name__) wait_for_environment_variables([ @@ -48,6 +49,10 @@ def main(): metrics_port = get_metrics_port() start_http_server(metrics_port) + # Starting DLT event dispatcher + event_dispatcher = DltEventDispatcher() + event_dispatcher.start() + # Starting DLT connector service grpc_service = DltConnectorService() grpc_service.start() @@ -57,6 +62,8 @@ def main(): LOGGER.info('Terminating...') grpc_service.stop() + event_dispatcher.stop() + event_dispatcher.join() LOGGER.info('Bye') return 0 diff --git a/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py b/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..8973ae621c1291f8ed6e2673f0c64b59712143ee --- /dev/null +++ b/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py @@ -0,0 +1,209 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, json, logging, threading +from typing import Any, Dict, Set +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.proto.context_pb2 import ContextId, Device, EventTypeEnum, Link, Slice, TopologyId +from common.proto.dlt_connector_pb2 import DltSliceId +from common.proto.dlt_gateway_pb2 import DltRecordEvent, DltRecordOperationEnum, DltRecordTypeEnum +from common.tools.context_queries.Context import create_context +from common.tools.context_queries.Device import add_device_to_topology +from common.tools.context_queries.Link import add_link_to_topology +from common.tools.context_queries.Topology import create_topology +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from dlt.connector.client.DltConnectorClient import DltConnectorClient +from dlt.connector.client.DltEventsCollector import DltEventsCollector +from dlt.connector.client.DltGatewayClient import DltGatewayClient +from interdomain.client.InterdomainClient import InterdomainClient + +LOGGER = logging.getLogger(__name__) + +GET_EVENT_TIMEOUT = 0.5 + +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) + +class Clients: + def __init__(self) -> None: + self.context_client = ContextClient() + self.dlt_connector_client = DltConnectorClient() + self.dlt_gateway_client = DltGatewayClient() + self.interdomain_client = InterdomainClient() + + def close(self) -> None: + self.interdomain_client.close() + self.dlt_gateway_client.close() + self.dlt_connector_client.close() + self.context_client.close() + +class DltEventDispatcher(threading.Thread): + def __init__(self) -> None: + LOGGER.debug('Creating connector...') + super().__init__(name='DltEventDispatcher', daemon=True) + self._terminate = threading.Event() + LOGGER.debug('Connector created') + + def start(self) -> None: + self._terminate.clear() + return super().start() + + def stop(self): + self._terminate.set() + + def run(self) -> None: + clients = Clients() + create_context(clients.context_client, DEFAULT_CONTEXT_UUID) + create_topology(clients.context_client, DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID) + create_topology(clients.context_client, DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID) + + dlt_events_collector = DltEventsCollector(clients.dlt_gateway_client, log_events_received=True) + dlt_events_collector.start() + + while not self._terminate.is_set(): + event = dlt_events_collector.get_event(block=True, timeout=GET_EVENT_TIMEOUT) + if event is None: continue + + existing_topology_ids = clients.context_client.ListTopologyIds(ADMIN_CONTEXT_ID) + local_domain_uuids = { + topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids + } + local_domain_uuids.discard(DEFAULT_TOPOLOGY_UUID) + local_domain_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID) + + self.dispatch_event(clients, local_domain_uuids, event) + + dlt_events_collector.stop() + clients.close() + + def dispatch_event(self, clients : Clients, local_domain_uuids : Set[str], event : DltRecordEvent) -> None: + record_type : DltRecordTypeEnum = event.record_id.type # {UNDEFINED/CONTEXT/TOPOLOGY/DEVICE/LINK/SERVICE/SLICE} + if record_type == DltRecordTypeEnum.DLTRECORDTYPE_DEVICE: + self._dispatch_device(clients, local_domain_uuids, event) + elif record_type == DltRecordTypeEnum.DLTRECORDTYPE_LINK: + self._dispatch_link(clients, local_domain_uuids, event) + elif record_type == DltRecordTypeEnum.DLTRECORDTYPE_SLICE: + self._dispatch_slice(clients, local_domain_uuids, event) + else: + raise NotImplementedError('EventType: {:s}'.format(grpc_message_to_json_string(event))) + + def _dispatch_device(self, clients : Clients, local_domain_uuids : Set[str], event : DltRecordEvent) -> None: + domain_uuid : str = event.record_id.domain_uuid.uuid + + if domain_uuid in local_domain_uuids: + MSG = '[_dispatch_device] Ignoring DLT event received (local): {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(event))) + return + + MSG = '[_dispatch_device] DLT event received (remote): {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(event))) + + event_type : EventTypeEnum = event.event.event_type # {UNDEFINED/CREATE/UPDATE/REMOVE} + if event_type in {EventTypeEnum.EVENTTYPE_CREATE, EventTypeEnum.EVENTTYPE_UPDATE}: + LOGGER.info('[_dispatch_device] event.record_id={:s}'.format(grpc_message_to_json_string(event.record_id))) + record = clients.dlt_gateway_client.GetFromDlt(event.record_id) + LOGGER.info('[_dispatch_device] record={:s}'.format(grpc_message_to_json_string(record))) + + create_context(clients.context_client, domain_uuid) + create_topology(clients.context_client, domain_uuid, DEFAULT_TOPOLOGY_UUID) + device = Device(**json.loads(record.data_json)) + clients.context_client.SetDevice(device) + device_uuid = device.device_id.device_uuid.uuid # pylint: disable=no-member + add_device_to_topology(clients.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID, device_uuid) + domain_context_id = ContextId(**json_context_id(domain_uuid)) + add_device_to_topology(clients.context_client, domain_context_id, DEFAULT_TOPOLOGY_UUID, device_uuid) + elif event_type in {EventTypeEnum.EVENTTYPE_DELETE}: + raise NotImplementedError('Delete Device') + + def _dispatch_link(self, clients : Clients, local_domain_uuids : Set[str], event : DltRecordEvent) -> None: + domain_uuid : str = event.record_id.domain_uuid.uuid + + if domain_uuid in local_domain_uuids: + MSG = '[_dispatch_link] Ignoring DLT event received (local): {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(event))) + return + + MSG = '[_dispatch_link] DLT event received (remote): {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(event))) + + event_type : EventTypeEnum = event.event.event_type # {UNDEFINED/CREATE/UPDATE/REMOVE} + if event_type in {EventTypeEnum.EVENTTYPE_CREATE, EventTypeEnum.EVENTTYPE_UPDATE}: + LOGGER.info('[_dispatch_link] event.record_id={:s}'.format(grpc_message_to_json_string(event.record_id))) + record = clients.dlt_gateway_client.GetFromDlt(event.record_id) + LOGGER.info('[_dispatch_link] record={:s}'.format(grpc_message_to_json_string(record))) + + link = Link(**json.loads(record.data_json)) + clients.context_client.SetLink(link) + link_uuid = link.link_id.link_uuid.uuid # pylint: disable=no-member + add_link_to_topology(clients.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID, link_uuid) + elif event_type in {EventTypeEnum.EVENTTYPE_DELETE}: + raise NotImplementedError('Delete Link') + + def _dispatch_slice(self, clients : Clients, local_domain_uuids : Set[str], event : DltRecordEvent) -> None: + event_type : EventTypeEnum = event.event.event_type # {UNDEFINED/CREATE/UPDATE/REMOVE} + domain_uuid : str = event.record_id.domain_uuid.uuid + + LOGGER.info('[_dispatch_slice] event.record_id={:s}'.format(grpc_message_to_json_string(event.record_id))) + record = clients.dlt_gateway_client.GetFromDlt(event.record_id) + LOGGER.info('[_dispatch_slice] record={:s}'.format(grpc_message_to_json_string(record))) + + slice_ = Slice(**json.loads(record.data_json)) + + context_uuid = slice_.slice_id.context_id.context_uuid.uuid + owner_uuid = slice_.slice_owner.owner_uuid.uuid + create_context(clients.context_client, context_uuid) + create_topology(clients.context_client, context_uuid, DEFAULT_TOPOLOGY_UUID) + + if domain_uuid in local_domain_uuids: + # it is for "me" + if event_type in {EventTypeEnum.EVENTTYPE_CREATE, EventTypeEnum.EVENTTYPE_UPDATE}: + try: + db_slice = clients.context_client.GetSlice(slice_.slice_id) + # exists + db_json_slice = grpc_message_to_json_string(db_slice) + except grpc.RpcError: + # not exists + db_json_slice = None + + _json_slice = grpc_message_to_json_string(slice_) + if db_json_slice != _json_slice: + # not exists or is different... + slice_id = clients.interdomain_client.RequestSlice(slice_) + topology_id = TopologyId(**json_topology_id(domain_uuid)) + dlt_slice_id = DltSliceId() + dlt_slice_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member + dlt_slice_id.slice_id.CopyFrom(slice_id) # pylint: disable=no-member + clients.dlt_connector_client.RecordSlice(dlt_slice_id) + + elif event_type in {EventTypeEnum.EVENTTYPE_DELETE}: + raise NotImplementedError('Delete Slice') + elif owner_uuid in local_domain_uuids: + # it is owned by me + # just update it locally + LOGGER.info('[_dispatch_slice] updating locally') + + local_slice = Slice() + local_slice.CopyFrom(slice_) + + # pylint: disable=no-member + del local_slice.slice_service_ids[:] # they are from remote domains so will not be present locally + del local_slice.slice_subslice_ids[:] # they are from remote domains so will not be present locally + + clients.context_client.SetSlice(local_slice) + else: + MSG = '[_dispatch_slice] Ignoring DLT event received (remote): {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(event))) + diff --git a/src/dlt/connector/service/event_dispatcher/__init__.py b/src/dlt/connector/service/event_dispatcher/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a --- /dev/null +++ b/src/dlt/connector/service/event_dispatcher/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/dlt/connector/service/tools/Checkers.py b/src/dlt/connector/service/tools/Checkers.py new file mode 100644 index 0000000000000000000000000000000000000000..e25d8d5a5068ee927088697ad3453fba99a1f316 --- /dev/null +++ b/src/dlt/connector/service/tools/Checkers.py @@ -0,0 +1,24 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.dlt_gateway_pb2 import DLTRECORDOPERATION_UNDEFINED, DLTRECORDTYPE_UNDEFINED, DltRecord + +def record_exists(record : DltRecord) -> bool: + exists = True + exists = exists and (len(record.record_id.domain_uuid.uuid) > 0) + exists = exists and (record.record_id.type != DLTRECORDTYPE_UNDEFINED) + exists = exists and (len(record.record_id.record_uuid.uuid) > 0) + #exists = exists and (record.operation != DLTRECORDOPERATION_UNDEFINED) + exists = exists and (len(record.data_json) > 0) + return exists diff --git a/src/dlt/connector/service/tools/__init__.py b/src/dlt/connector/service/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a --- /dev/null +++ b/src/dlt/connector/service/tools/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/interdomain/Dockerfile b/src/interdomain/Dockerfile index 388fcb76d08b49fdbc20baa3fb0d1ae957fdd46f..ee1071896d0ab0838a2126a2abb9a77278461573 100644 --- a/src/interdomain/Dockerfile +++ b/src/interdomain/Dockerfile @@ -63,10 +63,12 @@ RUN python3 -m pip install -r requirements.txt # Add component files into working directory WORKDIR /var/teraflow COPY src/context/. context/ -COPY src/device/. device/ +#COPY src/device/. device/ +COPY src/dlt/. dlt/ COPY src/interdomain/. interdomain/ -COPY src/monitoring/. monitoring/ -COPY src/service/. service/ +#COPY src/monitoring/. monitoring/ +COPY src/pathcomp/. pathcomp/ +#COPY src/service/. service/ COPY src/slice/. slice/ # Start the service diff --git a/src/interdomain/service/InterdomainServiceServicerImpl.py b/src/interdomain/service/InterdomainServiceServicerImpl.py index 01ba90ef5a6cb098e6d419fa0d6abb450893f8c6..a178095aeee81c3e6407cf1c6706b047fd1c65fc 100644 --- a/src/interdomain/service/InterdomainServiceServicerImpl.py +++ b/src/interdomain/service/InterdomainServiceServicerImpl.py @@ -12,15 +12,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, logging -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method -from common.proto.context_pb2 import ( - AuthenticationResult, Slice, SliceId, SliceStatus, SliceStatusEnum, TeraFlowController) +import grpc, logging, uuid +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.proto.context_pb2 import AuthenticationResult, Slice, SliceId, SliceStatusEnum, TeraFlowController, TopologyId from common.proto.interdomain_pb2_grpc import InterdomainServiceServicer -#from common.tools.grpc.Tools import grpc_message_to_json_string +from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.tools.context_queries.Context import create_context +from common.tools.context_queries.InterDomain import ( + compute_interdomain_path, compute_traversed_domains, get_local_device_uuids, is_inter_domain, is_multi_domain) +from common.tools.context_queries.Topology import create_topology +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient -from interdomain.service.RemoteDomainClients import RemoteDomainClients +from dlt.connector.client.DltConnectorClient import DltConnectorClient +from interdomain.service.topology_abstractor.DltRecordSender import DltRecordSender +from pathcomp.frontend.client.PathCompClient import PathCompClient from slice.client.SliceClient import SliceClient +from .RemoteDomainClients import RemoteDomainClients +from .Tools import compose_slice, compute_slice_owner, map_abstract_endpoints_to_real LOGGER = logging.getLogger(__name__) @@ -37,89 +46,92 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): @safe_and_metered_rpc_method(METRICS, LOGGER) def RequestSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: context_client = ContextClient() + pathcomp_client = PathCompClient() slice_client = SliceClient() - - domains_to_endpoints = {} - local_domain_uuid = None - for slice_endpoint_id in request.slice_endpoint_ids: - device_uuid = slice_endpoint_id.device_id.device_uuid.uuid - domain_uuid = device_uuid.split('@')[1] - endpoints = domains_to_endpoints.setdefault(domain_uuid, []) - endpoints.append(slice_endpoint_id) - if local_domain_uuid is None: local_domain_uuid = domain_uuid + dlt_connector_client = DltConnectorClient() + + local_device_uuids = get_local_device_uuids(context_client) + slice_owner_uuid = request.slice_owner.owner_uuid.uuid + not_inter_domain = not is_inter_domain(context_client, request.slice_endpoint_ids) + no_slice_owner = len(slice_owner_uuid) == 0 + is_local_slice_owner = slice_owner_uuid in local_device_uuids + if not_inter_domain and (no_slice_owner or is_local_slice_owner): + str_slice = grpc_message_to_json_string(request) + raise Exception('InterDomain can only handle inter-domain slice requests: {:s}'.format(str_slice)) + + interdomain_path = compute_interdomain_path(pathcomp_client, request) + str_interdomain_path = [ + [device_uuid, [ + (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) + for endpoint_id in endpoint_ids + ]] + for device_uuid, endpoint_ids in interdomain_path + ] + LOGGER.info('interdomain_path={:s}'.format(str(str_interdomain_path))) + + traversed_domains = compute_traversed_domains(context_client, interdomain_path) + str_traversed_domains = [ + (domain_uuid, is_local_domain, [ + (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) + for endpoint_id in endpoint_ids + ]) + for domain_uuid,is_local_domain,endpoint_ids in traversed_domains + ] + LOGGER.info('traversed_domains={:s}'.format(str(str_traversed_domains))) + + slice_owner_uuid = compute_slice_owner(context_client, traversed_domains) + LOGGER.info('slice_owner_uuid={:s}'.format(str(slice_owner_uuid))) + if slice_owner_uuid is None: + raise Exception('Unable to identify slice owner') reply = Slice() reply.CopyFrom(request) - # decompose remote slices - for domain_uuid, slice_endpoint_ids in domains_to_endpoints.items(): - if domain_uuid == local_domain_uuid: continue - - remote_slice_request = Slice() - remote_slice_request.slice_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid - remote_slice_request.slice_id.slice_uuid.uuid = \ - request.slice_id.slice_uuid.uuid + ':subslice@' + local_domain_uuid - remote_slice_request.slice_status.slice_status = request.slice_status.slice_status - for endpoint_id in slice_endpoint_ids: - slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add() - slice_endpoint_id.device_id.device_uuid.uuid = endpoint_id.device_id.device_uuid.uuid - slice_endpoint_id.endpoint_uuid.uuid = endpoint_id.endpoint_uuid.uuid - - # add endpoint connecting to remote domain - if domain_uuid == 'D1': - slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add() - slice_endpoint_id.device_id.device_uuid.uuid = 'R4@D1' - slice_endpoint_id.endpoint_uuid.uuid = '2/1' - elif domain_uuid == 'D2': - slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add() - slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2' - slice_endpoint_id.endpoint_uuid.uuid = '2/1' - - interdomain_client = self.remote_domain_clients.get_peer('remote-teraflow') - remote_slice_reply = interdomain_client.LookUpSlice(remote_slice_request) - if remote_slice_reply == remote_slice_request.slice_id: # pylint: disable=no-member - # successful case - remote_slice = interdomain_client.OrderSliceFromCatalog(remote_slice_request) - if remote_slice.slice_status.slice_status != SliceStatusEnum.SLICESTATUS_ACTIVE: - raise Exception('Remote Slice creation failed. Wrong Slice status returned') + dlt_record_sender = DltRecordSender(context_client, dlt_connector_client) + + for domain_uuid, is_local_domain, endpoint_ids in traversed_domains: + if is_local_domain: + slice_uuid = str(uuid.uuid4()) + LOGGER.info('[loop] [local] domain_uuid={:s} is_local_domain={:s} slice_uuid={:s}'.format( + str(domain_uuid), str(is_local_domain), str(slice_uuid))) + + # local slices always in DEFAULT_CONTEXT_UUID + #context_uuid = request.slice_id.context_id.context_uuid.uuid + context_uuid = DEFAULT_CONTEXT_UUID + endpoint_ids = map_abstract_endpoints_to_real(context_client, domain_uuid, endpoint_ids) + sub_slice = compose_slice( + context_uuid, slice_uuid, endpoint_ids, constraints=request.slice_constraints, + config_rules=request.slice_config.config_rules) + LOGGER.info('[loop] [local] sub_slice={:s}'.format(grpc_message_to_json_string(sub_slice))) + sub_slice_id = slice_client.CreateSlice(sub_slice) else: - # not in catalog - remote_slice = interdomain_client.CreateSliceAndAddToCatalog(remote_slice_request) - if remote_slice.slice_status.slice_status != SliceStatusEnum.SLICESTATUS_ACTIVE: - raise Exception('Remote Slice creation failed. Wrong Slice status returned') - - #context_client.SetSlice(remote_slice) - #subslice_id = reply.slice_subslice_ids.add() - #subslice_id.CopyFrom(remote_slice.slice_id) - - local_slice_request = Slice() - local_slice_request.slice_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid - local_slice_request.slice_id.slice_uuid.uuid = request.slice_id.slice_uuid.uuid + ':subslice' - local_slice_request.slice_status.slice_status = request.slice_status.slice_status - for endpoint_id in domains_to_endpoints[local_domain_uuid]: - slice_endpoint_id = local_slice_request.slice_endpoint_ids.add() - slice_endpoint_id.CopyFrom(endpoint_id) - - # add endpoint connecting to remote domain - if local_domain_uuid == 'D1': - slice_endpoint_id = local_slice_request.slice_endpoint_ids.add() - slice_endpoint_id.device_id.device_uuid.uuid = 'R4@D1' - slice_endpoint_id.endpoint_uuid.uuid = '2/1' - elif local_domain_uuid == 'D2': - slice_endpoint_id = local_slice_request.slice_endpoint_ids.add() - slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2' - slice_endpoint_id.endpoint_uuid.uuid = '2/1' - - local_slice_reply = slice_client.CreateSlice(local_slice_request) - if local_slice_reply != local_slice_request.slice_id: # pylint: disable=no-member - raise Exception('Local Slice creation failed. Wrong Slice Id was returned') - - subslice_id = reply.slice_subslice_ids.add() - subslice_id.context_id.context_uuid.uuid = local_slice_request.slice_id.context_id.context_uuid.uuid - subslice_id.slice_uuid.uuid = local_slice_request.slice_id.slice_uuid.uuid - - context_client.SetSlice(reply) - return reply.slice_id + slice_uuid = request.slice_id.slice_uuid.uuid + LOGGER.info('[loop] [remote] domain_uuid={:s} is_local_domain={:s} slice_uuid={:s}'.format( + str(domain_uuid), str(is_local_domain), str(slice_uuid))) + + # create context/topology for the remote domains where we are creating slices + create_context(context_client, domain_uuid) + create_topology(context_client, domain_uuid, DEFAULT_TOPOLOGY_UUID) + sub_slice = compose_slice( + domain_uuid, slice_uuid, endpoint_ids, constraints=request.slice_constraints, + config_rules=request.slice_config.config_rules, owner_uuid=slice_owner_uuid) + LOGGER.info('[loop] [remote] sub_slice={:s}'.format(grpc_message_to_json_string(sub_slice))) + sub_slice_id = context_client.SetSlice(sub_slice) + topology_id = TopologyId(**json_topology_id(domain_uuid)) + dlt_record_sender.add_slice(topology_id, sub_slice) + + LOGGER.info('[loop] adding sub-slice') + reply.slice_subslice_ids.add().CopyFrom(sub_slice_id) # pylint: disable=no-member + + LOGGER.info('Recording Remote Slice requests to DLT') + dlt_record_sender.commit() + + LOGGER.info('Activating interdomain slice') + reply.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member + + LOGGER.info('Updating interdomain slice') + slice_id = context_client.SetSlice(reply) + return slice_id @safe_and_metered_rpc_method(METRICS, LOGGER) def Authenticate(self, request : TeraFlowController, context : grpc.ServicerContext) -> AuthenticationResult: diff --git a/src/interdomain/service/Tools.py b/src/interdomain/service/Tools.py new file mode 100644 index 0000000000000000000000000000000000000000..fb6371603ea90437437541bb995a59813764d9ef --- /dev/null +++ b/src/interdomain/service/Tools.py @@ -0,0 +1,131 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import List, Optional, Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.proto.context_pb2 import ( + ConfigRule, Constraint, ContextId, Device, Empty, EndPointId, Slice, SliceStatusEnum) +from common.tools.context_queries.CheckType import device_type_is_network, endpoint_type_is_border +from common.tools.context_queries.InterDomain import get_local_device_uuids +from common.tools.grpc.ConfigRules import copy_config_rules +from common.tools.grpc.Constraints import copy_constraints +from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient + +LOGGER = logging.getLogger(__name__) + +def compute_slice_owner( + context_client : ContextClient, traversed_domains : List[Tuple[str, Device, bool, List[EndPointId]]] +) -> Optional[str]: + traversed_domain_uuids = {traversed_domain[0] for traversed_domain in traversed_domains} + + existing_topology_ids = context_client.ListTopologyIds(ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))) + existing_topology_uuids = { + topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids + } + existing_topology_uuids.discard(DEFAULT_TOPOLOGY_UUID) + existing_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID) + + candidate_owner_uuids = traversed_domain_uuids.intersection(existing_topology_uuids) + if len(candidate_owner_uuids) != 1: + data = { + 'traversed_domain_uuids' : [td_uuid for td_uuid in traversed_domain_uuids ], + 'existing_topology_uuids': [et_uuid for et_uuid in existing_topology_uuids], + 'candidate_owner_uuids' : [co_uuid for co_uuid in candidate_owner_uuids ], + } + LOGGER.warning('Unable to identify slice owner: {:s}'.format(json.dumps(data))) + return None + + return candidate_owner_uuids.pop() + +def compose_slice( + context_uuid : str, slice_uuid : str, endpoint_ids : List[EndPointId], constraints : List[Constraint] = [], + config_rules : List[ConfigRule] = [], owner_uuid : Optional[str] = None +) -> Slice: + slice_ = Slice() + slice_.slice_id.context_id.context_uuid.uuid = context_uuid # pylint: disable=no-member + slice_.slice_id.slice_uuid.uuid = slice_uuid # pylint: disable=no-member + slice_.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED # pylint: disable=no-member + + if owner_uuid is not None: + slice_.slice_owner.owner_uuid.uuid = owner_uuid # pylint: disable=no-member + + if len(endpoint_ids) >= 2: + slice_.slice_endpoint_ids.add().CopyFrom(endpoint_ids[0]) # pylint: disable=no-member + slice_.slice_endpoint_ids.add().CopyFrom(endpoint_ids[-1]) # pylint: disable=no-member + + if len(constraints) > 0: + copy_constraints(constraints, slice_.slice_constraints) # pylint: disable=no-member + + if len(config_rules) > 0: + copy_config_rules(config_rules, slice_.slice_config.config_rules) # pylint: disable=no-member + + return slice_ + +def map_abstract_endpoints_to_real( + context_client : ContextClient, local_domain_uuid : str, abstract_endpoint_ids : List[EndPointId] +) -> List[EndPointId]: + + local_device_uuids = get_local_device_uuids(context_client) + all_devices = context_client.ListDevices(Empty()) + + map_endpoints_to_devices = dict() + for device in all_devices.devices: + LOGGER.info('[map_abstract_endpoints_to_real] Checking device {:s}'.format( + grpc_message_to_json_string(device))) + + if device_type_is_network(device.device_type): + LOGGER.info('[map_abstract_endpoints_to_real] Ignoring network device') + continue + device_uuid = device.device_id.device_uuid.uuid + if device_uuid not in local_device_uuids: + LOGGER.info('[map_abstract_endpoints_to_real] Ignoring non-local device') + continue + + for endpoint in device.device_endpoints: + LOGGER.info('[map_abstract_endpoints_to_real] Checking endpoint {:s}'.format( + grpc_message_to_json_string(endpoint))) + endpoint_id = endpoint.endpoint_id + device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + map_endpoints_to_devices[(device_uuid, endpoint_uuid)] = endpoint_id + if endpoint_type_is_border(endpoint.endpoint_type): + map_endpoints_to_devices[(local_domain_uuid, endpoint_uuid)] = endpoint_id + + LOGGER.info('[map_abstract_endpoints_to_real] map_endpoints_to_devices={:s}'.format( + str({ + endpoint_tuple:grpc_message_to_json(endpoint_id) + for endpoint_tuple,endpoint_id in map_endpoints_to_devices.items() + }))) + + # map abstract device/endpoints to real device/endpoints + real_endpoint_ids = [] + for endpoint_id in abstract_endpoint_ids: + LOGGER.info('[map_abstract_endpoints_to_real] Mapping endpoint_id {:s} ...'.format( + grpc_message_to_json_string(endpoint_id))) + device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + _endpoint_id = map_endpoints_to_devices.get((device_uuid, endpoint_uuid)) + if _endpoint_id is None: + LOGGER.warning('map_endpoints_to_devices={:s}'.format(str(map_endpoints_to_devices))) + MSG = 'Unable to map abstract EndPoint({:s}) to real one.' + raise Exception(MSG.format(grpc_message_to_json_string(endpoint_id))) + + LOGGER.info('[map_abstract_endpoints_to_real] ... to endpoint_id {:s}'.format( + grpc_message_to_json_string(_endpoint_id))) + real_endpoint_ids.append(_endpoint_id) + + return real_endpoint_ids diff --git a/src/interdomain/service/__main__.py b/src/interdomain/service/__main__.py index c0a078f4ded85ab957011d21d56c97c8d303dc2a..bcbda8dfda05ec7b245b5939d8a3afc4b979562f 100644 --- a/src/interdomain/service/__main__.py +++ b/src/interdomain/service/__main__.py @@ -17,7 +17,8 @@ from prometheus_client import start_http_server from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, - get_service_port_grpc, wait_for_environment_variables) + wait_for_environment_variables) +from .topology_abstractor.TopologyAbstractor import TopologyAbstractor from .InterdomainService import InterdomainService from .RemoteDomainClients import RemoteDomainClients @@ -32,14 +33,18 @@ def main(): global LOGGER # pylint: disable=global-statement log_level = get_log_level() - logging.basicConfig(level=log_level) + logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") LOGGER = logging.getLogger(__name__) wait_for_environment_variables([ - get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), - get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), - get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_HOST ), - get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.DLT, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.DLT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), ]) signal.signal(signal.SIGINT, signal_handler) @@ -58,14 +63,19 @@ def main(): grpc_service = InterdomainService(remote_domain_clients) grpc_service.start() + # Subscribe to Context Events + topology_abstractor = TopologyAbstractor() + topology_abstractor.start() + # TODO: improve with configuration the definition of the remote peers - interdomain_service_port_grpc = get_service_port_grpc(ServiceNameEnum.INTERDOMAIN) - remote_domain_clients.add_peer('remote-teraflow', 'remote-teraflow', interdomain_service_port_grpc) + #interdomain_service_port_grpc = get_service_port_grpc(ServiceNameEnum.INTERDOMAIN) + #remote_domain_clients.add_peer('remote-teraflow', 'remote-teraflow', interdomain_service_port_grpc) # Wait for Ctrl+C or termination signal while not terminate.wait(timeout=0.1): pass LOGGER.info('Terminating...') + topology_abstractor.stop() grpc_service.stop() LOGGER.info('Bye') diff --git a/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py b/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py new file mode 100644 index 0000000000000000000000000000000000000000..01ba90ef5a6cb098e6d419fa0d6abb450893f8c6 --- /dev/null +++ b/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py @@ -0,0 +1,153 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.proto.context_pb2 import ( + AuthenticationResult, Slice, SliceId, SliceStatus, SliceStatusEnum, TeraFlowController) +from common.proto.interdomain_pb2_grpc import InterdomainServiceServicer +#from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from interdomain.service.RemoteDomainClients import RemoteDomainClients +from slice.client.SliceClient import SliceClient + +LOGGER = logging.getLogger(__name__) + +SERVICE_NAME = 'Interdomain' +METHOD_NAMES = ['RequestSlice', 'Authenticate', 'LookUpSlice', 'OrderSliceFromCatalog', 'CreateSliceAndAddToCatalog'] +METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) + +class InterdomainServiceServicerImpl(InterdomainServiceServicer): + def __init__(self, remote_domain_clients : RemoteDomainClients): + LOGGER.debug('Creating Servicer...') + self.remote_domain_clients = remote_domain_clients + LOGGER.debug('Servicer Created') + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RequestSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: + context_client = ContextClient() + slice_client = SliceClient() + + domains_to_endpoints = {} + local_domain_uuid = None + for slice_endpoint_id in request.slice_endpoint_ids: + device_uuid = slice_endpoint_id.device_id.device_uuid.uuid + domain_uuid = device_uuid.split('@')[1] + endpoints = domains_to_endpoints.setdefault(domain_uuid, []) + endpoints.append(slice_endpoint_id) + if local_domain_uuid is None: local_domain_uuid = domain_uuid + + reply = Slice() + reply.CopyFrom(request) + + # decompose remote slices + for domain_uuid, slice_endpoint_ids in domains_to_endpoints.items(): + if domain_uuid == local_domain_uuid: continue + + remote_slice_request = Slice() + remote_slice_request.slice_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid + remote_slice_request.slice_id.slice_uuid.uuid = \ + request.slice_id.slice_uuid.uuid + ':subslice@' + local_domain_uuid + remote_slice_request.slice_status.slice_status = request.slice_status.slice_status + for endpoint_id in slice_endpoint_ids: + slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add() + slice_endpoint_id.device_id.device_uuid.uuid = endpoint_id.device_id.device_uuid.uuid + slice_endpoint_id.endpoint_uuid.uuid = endpoint_id.endpoint_uuid.uuid + + # add endpoint connecting to remote domain + if domain_uuid == 'D1': + slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add() + slice_endpoint_id.device_id.device_uuid.uuid = 'R4@D1' + slice_endpoint_id.endpoint_uuid.uuid = '2/1' + elif domain_uuid == 'D2': + slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add() + slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2' + slice_endpoint_id.endpoint_uuid.uuid = '2/1' + + interdomain_client = self.remote_domain_clients.get_peer('remote-teraflow') + remote_slice_reply = interdomain_client.LookUpSlice(remote_slice_request) + if remote_slice_reply == remote_slice_request.slice_id: # pylint: disable=no-member + # successful case + remote_slice = interdomain_client.OrderSliceFromCatalog(remote_slice_request) + if remote_slice.slice_status.slice_status != SliceStatusEnum.SLICESTATUS_ACTIVE: + raise Exception('Remote Slice creation failed. Wrong Slice status returned') + else: + # not in catalog + remote_slice = interdomain_client.CreateSliceAndAddToCatalog(remote_slice_request) + if remote_slice.slice_status.slice_status != SliceStatusEnum.SLICESTATUS_ACTIVE: + raise Exception('Remote Slice creation failed. Wrong Slice status returned') + + #context_client.SetSlice(remote_slice) + #subslice_id = reply.slice_subslice_ids.add() + #subslice_id.CopyFrom(remote_slice.slice_id) + + local_slice_request = Slice() + local_slice_request.slice_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid + local_slice_request.slice_id.slice_uuid.uuid = request.slice_id.slice_uuid.uuid + ':subslice' + local_slice_request.slice_status.slice_status = request.slice_status.slice_status + for endpoint_id in domains_to_endpoints[local_domain_uuid]: + slice_endpoint_id = local_slice_request.slice_endpoint_ids.add() + slice_endpoint_id.CopyFrom(endpoint_id) + + # add endpoint connecting to remote domain + if local_domain_uuid == 'D1': + slice_endpoint_id = local_slice_request.slice_endpoint_ids.add() + slice_endpoint_id.device_id.device_uuid.uuid = 'R4@D1' + slice_endpoint_id.endpoint_uuid.uuid = '2/1' + elif local_domain_uuid == 'D2': + slice_endpoint_id = local_slice_request.slice_endpoint_ids.add() + slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2' + slice_endpoint_id.endpoint_uuid.uuid = '2/1' + + local_slice_reply = slice_client.CreateSlice(local_slice_request) + if local_slice_reply != local_slice_request.slice_id: # pylint: disable=no-member + raise Exception('Local Slice creation failed. Wrong Slice Id was returned') + + subslice_id = reply.slice_subslice_ids.add() + subslice_id.context_id.context_uuid.uuid = local_slice_request.slice_id.context_id.context_uuid.uuid + subslice_id.slice_uuid.uuid = local_slice_request.slice_id.slice_uuid.uuid + + context_client.SetSlice(reply) + return reply.slice_id + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def Authenticate(self, request : TeraFlowController, context : grpc.ServicerContext) -> AuthenticationResult: + auth_result = AuthenticationResult() + auth_result.context_id.CopyFrom(request.context_id) # pylint: disable=no-member + auth_result.authenticated = True + return auth_result + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def LookUpSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: + try: + context_client = ContextClient() + slice_ = context_client.GetSlice(request.slice_id) + return slice_.slice_id + except grpc.RpcError: + #LOGGER.exception('Unable to get slice({:s})'.format(grpc_message_to_json_string(request.slice_id))) + return SliceId() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def OrderSliceFromCatalog(self, request : Slice, context : grpc.ServicerContext) -> Slice: + raise NotImplementedError('OrderSliceFromCatalog') + #return Slice() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def CreateSliceAndAddToCatalog(self, request : Slice, context : grpc.ServicerContext) -> Slice: + context_client = ContextClient() + slice_client = SliceClient() + reply = slice_client.CreateSlice(request) + if reply != request.slice_id: # pylint: disable=no-member + raise Exception('Slice creation failed. Wrong Slice Id was returned') + return context_client.GetSlice(request.slice_id) diff --git a/src/interdomain/service/topology_abstractor/AbstractDevice.py b/src/interdomain/service/topology_abstractor/AbstractDevice.py new file mode 100644 index 0000000000000000000000000000000000000000..3448c1036d4ef086d679d5f4308ae95decfbffa7 --- /dev/null +++ b/src/interdomain/service/topology_abstractor/AbstractDevice.py @@ -0,0 +1,190 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging +from typing import Dict, Optional +from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.DeviceTypes import DeviceTypeEnum +from common.proto.context_pb2 import ( + ContextId, Device, DeviceDriverEnum, DeviceId, DeviceOperationalStatusEnum, EndPoint) +from common.tools.context_queries.CheckType import ( + device_type_is_datacenter, device_type_is_network, endpoint_type_is_border) +from common.tools.context_queries.Device import add_device_to_topology, get_existing_device_uuids +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device, json_device_id +from context.client.ContextClient import ContextClient + +LOGGER = logging.getLogger(__name__) + +class AbstractDevice: + def __init__(self, device_uuid : str, device_type : DeviceTypeEnum): + self.__context_client = ContextClient() + self.__device_uuid : str = device_uuid + self.__device_type : DeviceTypeEnum = device_type + self.__device : Optional[Device] = None + self.__device_id : Optional[DeviceId] = None + + # Dict[device_uuid, Dict[endpoint_uuid, abstract EndPoint]] + self.__device_endpoint_to_abstract : Dict[str, Dict[str, EndPoint]] = dict() + + # Dict[endpoint_uuid, device_uuid] + self.__abstract_endpoint_to_device : Dict[str, str] = dict() + + @property + def uuid(self) -> str: return self.__device_uuid + + @property + def device_id(self) -> Optional[DeviceId]: return self.__device_id + + @property + def device(self) -> Optional[Device]: return self.__device + + def get_endpoint(self, device_uuid : str, endpoint_uuid : str) -> Optional[EndPoint]: + return self.__device_endpoint_to_abstract.get(device_uuid, {}).get(endpoint_uuid) + + def initialize(self) -> bool: + if self.__device is not None: return False + + existing_device_uuids = get_existing_device_uuids(self.__context_client) + create_abstract_device = self.__device_uuid not in existing_device_uuids + + if create_abstract_device: + self._create_empty() + else: + self._load_existing() + + is_datacenter = device_type_is_datacenter(self.__device_type) + is_network = device_type_is_network(self.__device_type) + if is_datacenter or is_network: + # Add abstract device to topologies [INTERDOMAIN_TOPOLOGY_UUID] + context_id = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) + topology_uuids = [INTERDOMAIN_TOPOLOGY_UUID] + for topology_uuid in topology_uuids: + add_device_to_topology(self.__context_client, context_id, topology_uuid, self.__device_uuid) + + # seems not needed; to be removed in future releases + #if is_datacenter and create_abstract_device: + # dc_device = self.__context_client.GetDevice(DeviceId(**json_device_id(self.__device_uuid))) + # if device_type_is_datacenter(dc_device.device_type): + # self.update_endpoints(dc_device) + #elif is_network: + # devices_in_admin_topology = get_devices_in_topology( + # self.__context_client, context_id, DEFAULT_TOPOLOGY_UUID) + # for device in devices_in_admin_topology: + # if device_type_is_datacenter(device.device_type): continue + # self.update_endpoints(device) + + return True + + def _create_empty(self) -> None: + device_uuid = self.__device_uuid + + device = Device(**json_device( + device_uuid, self.__device_type.value, DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED, + endpoints=[], config_rules=[], drivers=[DeviceDriverEnum.DEVICEDRIVER_UNDEFINED] + )) + self.__context_client.SetDevice(device) + self.__device = device + self.__device_id = self.__device.device_id + + def _load_existing(self) -> None: + self.__device_endpoint_to_abstract = dict() + self.__abstract_endpoint_to_device = dict() + + self.__device_id = DeviceId(**json_device_id(self.__device_uuid)) + self.__device = self.__context_client.GetDevice(self.__device_id) + self.__device_type = self.__device.device_type + device_uuid = self.__device_id.device_uuid.uuid + + device_type = self.__device_type + is_datacenter = device_type_is_datacenter(device_type) + is_network = device_type_is_network(device_type) + if not is_datacenter and not is_network: + LOGGER.warning('Unsupported InterDomain Device Type: {:s}'.format(str(device_type))) + return + + # for each endpoint in abstract device, populate internal data structures and mappings + for interdomain_endpoint in self.__device.device_endpoints: + endpoint_uuid : str = interdomain_endpoint.endpoint_id.endpoint_uuid.uuid + + if is_network: + endpoint_uuid,device_uuid = endpoint_uuid.split('@', maxsplit=1) + + self.__device_endpoint_to_abstract\ + .setdefault(device_uuid, {}).setdefault(endpoint_uuid, interdomain_endpoint) + self.__abstract_endpoint_to_device\ + .setdefault(endpoint_uuid, device_uuid) + + def _update_endpoint_type(self, device_uuid : str, endpoint_uuid : str, endpoint_type : str) -> bool: + device_endpoint_to_abstract = self.__device_endpoint_to_abstract.get(device_uuid, {}) + interdomain_endpoint = device_endpoint_to_abstract.get(endpoint_uuid) + interdomain_endpoint_type = interdomain_endpoint.endpoint_type + if endpoint_type == interdomain_endpoint_type: return False + interdomain_endpoint.endpoint_type = endpoint_type + return True + + def _add_endpoint(self, device_uuid : str, endpoint_uuid : str, endpoint_type : str) -> EndPoint: + interdomain_endpoint = self.__device.device_endpoints.add() + interdomain_endpoint.endpoint_id.device_id.CopyFrom(self.__device_id) + interdomain_endpoint.endpoint_id.endpoint_uuid.uuid = endpoint_uuid + interdomain_endpoint.endpoint_type = endpoint_type + + self.__device_endpoint_to_abstract\ + .setdefault(device_uuid, {}).setdefault(endpoint_uuid, interdomain_endpoint) + self.__abstract_endpoint_to_device\ + .setdefault(endpoint_uuid, device_uuid) + + return interdomain_endpoint + + def _remove_endpoint( + self, device_uuid : str, endpoint_uuid : str, interdomain_endpoint : EndPoint + ) -> None: + self.__abstract_endpoint_to_device.pop(endpoint_uuid, None) + device_endpoint_to_abstract = self.__device_endpoint_to_abstract.get(device_uuid, {}) + device_endpoint_to_abstract.pop(endpoint_uuid, None) + self.__device.device_endpoints.remove(interdomain_endpoint) + + def update_endpoints(self, device : Device) -> bool: + if device_type_is_datacenter(self.__device.device_type): return False + + device_uuid = device.device_id.device_uuid.uuid + device_border_endpoint_uuids = { + endpoint.endpoint_id.endpoint_uuid.uuid : endpoint.endpoint_type + for endpoint in device.device_endpoints + if endpoint_type_is_border(endpoint.endpoint_type) + } + + updated = False + + # for each border endpoint in abstract device that is not in device; remove from abstract device + device_endpoint_to_abstract = self.__device_endpoint_to_abstract.get(device_uuid, {}) + _device_endpoint_to_abstract = copy.deepcopy(device_endpoint_to_abstract) + for endpoint_uuid, interdomain_endpoint in _device_endpoint_to_abstract.items(): + if endpoint_uuid in device_border_endpoint_uuids: continue + # remove interdomain endpoint that is not in device + self._remove_endpoint(device_uuid, endpoint_uuid, interdomain_endpoint) + updated = True + + # for each border endpoint in device that is not in abstract device; add to abstract device + for endpoint_uuid,endpoint_type in device_border_endpoint_uuids.items(): + # if already added; just check endpoint type is not modified + if endpoint_uuid in self.__abstract_endpoint_to_device: + updated = updated or self._update_endpoint_type(device_uuid, endpoint_uuid, endpoint_type) + continue + + # otherwise, add it to the abstract device + self._add_endpoint(device_uuid, endpoint_uuid, endpoint_type) + updated = True + + return updated diff --git a/src/interdomain/service/topology_abstractor/AbstractLink.py b/src/interdomain/service/topology_abstractor/AbstractLink.py new file mode 100644 index 0000000000000000000000000000000000000000..7fe7b07b0708ebf8490cf4304646037973b05d56 --- /dev/null +++ b/src/interdomain/service/topology_abstractor/AbstractLink.py @@ -0,0 +1,126 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging +from typing import Dict, List, Optional, Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.proto.context_pb2 import ContextId, EndPointId, Link, LinkId +from common.tools.context_queries.Link import add_link_to_topology, get_existing_link_uuids +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Link import json_link, json_link_id +from context.client.ContextClient import ContextClient + +LOGGER = logging.getLogger(__name__) + +class AbstractLink: + def __init__(self, link_uuid : str): + self.__context_client = ContextClient() + self.__link_uuid : str = link_uuid + self.__link : Optional[Link] = None + self.__link_id : Optional[LinkId] = None + + # Dict[(device_uuid, endpoint_uuid), abstract EndPointId] + self.__device_endpoint_to_abstract : Dict[Tuple[str, str], EndPointId] = dict() + + @property + def uuid(self) -> str: return self.__link_uuid + + @property + def link_id(self) -> Optional[LinkId]: return self.__link_id + + @property + def link(self) -> Optional[Link]: return self.__link + + @staticmethod + def compose_uuid( + device_uuid_a : str, endpoint_uuid_a : str, device_uuid_z : str, endpoint_uuid_z : str + ) -> str: + # sort endpoints lexicographically to prevent duplicities + link_endpoint_uuids = sorted([ + (device_uuid_a, endpoint_uuid_a), + (device_uuid_z, endpoint_uuid_z) + ]) + link_uuid = '{:s}/{:s}=={:s}/{:s}'.format( + link_endpoint_uuids[0][0], link_endpoint_uuids[0][1], + link_endpoint_uuids[1][0], link_endpoint_uuids[1][1]) + return link_uuid + + def initialize(self) -> bool: + if self.__link is not None: return False + + existing_link_uuids = get_existing_link_uuids(self.__context_client) + + create = self.__link_uuid not in existing_link_uuids + if create: + self._create_empty() + else: + self._load_existing() + + # Add abstract link to topologies [INTERDOMAIN_TOPOLOGY_UUID] + context_id = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) + topology_uuids = [INTERDOMAIN_TOPOLOGY_UUID] + for topology_uuid in topology_uuids: + add_link_to_topology(self.__context_client, context_id, topology_uuid, self.__link_uuid) + + return create + + def _create_empty(self) -> None: + link = Link(**json_link(self.__link_uuid, endpoint_ids=[])) + self.__context_client.SetLink(link) + self.__link = link + self.__link_id = self.__link.link_id + + def _load_existing(self) -> None: + self.__link_id = LinkId(**json_link_id(self.__link_uuid)) + self.__link = self.__context_client.GetLink(self.__link_id) + + self.__device_endpoint_to_abstract = dict() + + # for each endpoint in abstract link, populate internal data structures and mappings + for endpoint_id in self.__link.link_endpoint_ids: + device_uuid : str = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid : str = endpoint_id.endpoint_uuid.uuid + self.__device_endpoint_to_abstract.setdefault((device_uuid, endpoint_uuid), endpoint_id) + + def _add_endpoint(self, device_uuid : str, endpoint_uuid : str) -> None: + endpoint_id = self.__link.link_endpoint_ids.add() + endpoint_id.device_id.device_uuid.uuid = device_uuid + endpoint_id.endpoint_uuid.uuid = endpoint_uuid + self.__device_endpoint_to_abstract.setdefault((device_uuid, endpoint_uuid), endpoint_id) + + def _remove_endpoint(self, device_uuid : str, endpoint_uuid : str) -> None: + device_endpoint_to_abstract = self.__device_endpoint_to_abstract.get(device_uuid, {}) + endpoint_id = device_endpoint_to_abstract.pop(endpoint_uuid, None) + if endpoint_id is not None: self.__link.link_endpoint_ids.remove(endpoint_id) + + def update_endpoints(self, link_endpoint_uuids : List[Tuple[str, str]] = []) -> bool: + updated = False + + # for each endpoint in abstract link that is not in link; remove from abstract link + device_endpoint_to_abstract = copy.deepcopy(self.__device_endpoint_to_abstract) + for device_uuid, endpoint_uuid in device_endpoint_to_abstract.keys(): + if (device_uuid, endpoint_uuid) in link_endpoint_uuids: continue + # remove endpoint_id that is not in link + self._remove_endpoint(device_uuid, endpoint_uuid) + updated = True + + # for each endpoint in link that is not in abstract link; add to abstract link + for device_uuid, endpoint_uuid in link_endpoint_uuids: + # if already added; just check endpoint type is not modified + if (device_uuid, endpoint_uuid) in self.__device_endpoint_to_abstract: continue + # otherwise, add it to the abstract device + self._add_endpoint(device_uuid, endpoint_uuid) + updated = True + + return updated diff --git a/src/interdomain/service/topology_abstractor/DltRecordSender.py b/src/interdomain/service/topology_abstractor/DltRecordSender.py new file mode 100644 index 0000000000000000000000000000000000000000..f7e3d81dded18c7406b54389cbe128c0fd27d7b4 --- /dev/null +++ b/src/interdomain/service/topology_abstractor/DltRecordSender.py @@ -0,0 +1,91 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Dict, List, Tuple +from common.proto.context_pb2 import Device, Link, Service, Slice, TopologyId +from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId +from context.client.ContextClient import ContextClient +from dlt.connector.client.DltConnectorClient import DltConnectorClient +from .Types import DltRecordTypes + +LOGGER = logging.getLogger(__name__) + +class DltRecordSender: + def __init__(self, context_client : ContextClient, dlt_connector_client : DltConnectorClient) -> None: + self.context_client = context_client + self.dlt_connector_client = dlt_connector_client + self.dlt_record_uuids : List[str] = list() + self.dlt_record_uuid_to_data : Dict[str, Tuple[TopologyId, DltRecordTypes]] = dict() + + def _add_record(self, record_uuid : str, data : Tuple[TopologyId, DltRecordTypes]) -> None: + if record_uuid in self.dlt_record_uuid_to_data: return + self.dlt_record_uuid_to_data[record_uuid] = data + self.dlt_record_uuids.append(record_uuid) + + def add_device(self, topology_id : TopologyId, device : Device) -> None: + topology_uuid = topology_id.topology_uuid.uuid + device_uuid = device.device_id.device_uuid.uuid + record_uuid = '{:s}:device:{:s}'.format(topology_uuid, device_uuid) + self._add_record(record_uuid, (topology_id, device)) + + def add_link(self, topology_id : TopologyId, link : Link) -> None: + topology_uuid = topology_id.topology_uuid.uuid + link_uuid = link.link_id.link_uuid.uuid + record_uuid = '{:s}:link:{:s}'.format(topology_uuid, link_uuid) + self._add_record(record_uuid, (topology_id, link)) + + def add_service(self, topology_id : TopologyId, service : Service) -> None: + topology_uuid = topology_id.topology_uuid.uuid + context_uuid = service.service_id.context_id.context_uuid.uuid + service_uuid = service.service_id.service_uuid.uuid + record_uuid = '{:s}:service:{:s}/{:s}'.format(topology_uuid, context_uuid, service_uuid) + self._add_record(record_uuid, (topology_id, service)) + + def add_slice(self, topology_id : TopologyId, slice_ : Slice) -> None: + topology_uuid = topology_id.topology_uuid.uuid + context_uuid = slice_.slice_id.context_id.context_uuid.uuid + slice_uuid = slice_.slice_id.slice_uuid.uuid + record_uuid = '{:s}:slice:{:s}/{:s}'.format(topology_uuid, context_uuid, slice_uuid) + self._add_record(record_uuid, (topology_id, slice_)) + + def commit(self) -> None: + for dlt_record_uuid in self.dlt_record_uuids: + topology_id,dlt_record = self.dlt_record_uuid_to_data[dlt_record_uuid] + if isinstance(dlt_record, Device): + device_id = self.context_client.SetDevice(dlt_record) + dlt_device_id = DltDeviceId() + dlt_device_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member + dlt_device_id.device_id.CopyFrom(device_id) # pylint: disable=no-member + self.dlt_connector_client.RecordDevice(dlt_device_id) + elif isinstance(dlt_record, Link): + link_id = self.context_client.SetLink(dlt_record) + dlt_link_id = DltLinkId() + dlt_link_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member + dlt_link_id.link_id.CopyFrom(link_id) # pylint: disable=no-member + self.dlt_connector_client.RecordLink(dlt_link_id) + elif isinstance(dlt_record, Service): + service_id = self.context_client.SetService(dlt_record) + dlt_service_id = DltServiceId() + dlt_service_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member + dlt_service_id.service_id.CopyFrom(service_id) # pylint: disable=no-member + self.dlt_connector_client.RecordService(dlt_service_id) + elif isinstance(dlt_record, Slice): + slice_id = self.context_client.SetSlice(dlt_record) + dlt_slice_id = DltSliceId() + dlt_slice_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member + dlt_slice_id.slice_id.CopyFrom(slice_id) # pylint: disable=no-member + self.dlt_connector_client.RecordSlice(dlt_slice_id) + else: + LOGGER.error('Unsupported Record({:s})'.format(str(dlt_record))) diff --git a/src/interdomain/service/topology_abstractor/TopologyAbstractor.py b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py new file mode 100644 index 0000000000000000000000000000000000000000..5729fe733c3a9a8f73f188b40338160ab286998b --- /dev/null +++ b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py @@ -0,0 +1,288 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, threading +from typing import Dict, Optional, Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.DeviceTypes import DeviceTypeEnum +from common.proto.context_pb2 import ( + ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EndPoint, EndPointId, Link, LinkEvent, TopologyId, + TopologyEvent) +from common.tools.context_queries.CheckType import ( + device_type_is_datacenter, device_type_is_network, endpoint_type_is_border) +from common.tools.context_queries.Context import create_context +from common.tools.context_queries.Device import get_devices_in_topology, get_uuids_of_devices_in_topology +from common.tools.context_queries.Link import get_links_in_topology +from common.tools.context_queries.Topology import create_missing_topologies +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from dlt.connector.client.DltConnectorClient import DltConnectorClient +from .AbstractDevice import AbstractDevice +from .AbstractLink import AbstractLink +from .DltRecordSender import DltRecordSender +from .Types import EventTypes + +LOGGER = logging.getLogger(__name__) + +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) +INTERDOMAIN_TOPOLOGY_ID = TopologyId(**json_topology_id(INTERDOMAIN_TOPOLOGY_UUID, context_id=ADMIN_CONTEXT_ID)) + +class TopologyAbstractor(threading.Thread): + def __init__(self) -> None: + super().__init__(daemon=True) + self.terminate = threading.Event() + + self.context_client = ContextClient() + self.dlt_connector_client = DltConnectorClient() + self.context_event_collector = EventsCollector(self.context_client) + + self.real_to_abstract_device_uuid : Dict[str, str] = dict() + self.real_to_abstract_link_uuid : Dict[str, str] = dict() + + self.abstract_device_to_topology_id : Dict[str, TopologyId] = dict() + self.abstract_link_to_topology_id : Dict[str, TopologyId] = dict() + + self.abstract_devices : Dict[str, AbstractDevice] = dict() + self.abstract_links : Dict[Tuple[str,str], AbstractLink] = dict() + + def stop(self): + self.terminate.set() + + def run(self) -> None: + self.context_client.connect() + create_context(self.context_client, DEFAULT_CONTEXT_UUID) + topology_uuids = [DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID] + create_missing_topologies(self.context_client, ADMIN_CONTEXT_ID, topology_uuids) + + self.dlt_connector_client.connect() + self.context_event_collector.start() + + while not self.terminate.is_set(): + event = self.context_event_collector.get_event(timeout=0.1) + if event is None: continue + #if self.ignore_event(event): continue + LOGGER.info('Processing Event({:s})...'.format(grpc_message_to_json_string(event))) + self.update_abstraction(event) + + self.context_event_collector.stop() + self.context_client.close() + self.dlt_connector_client.close() + + #def ignore_event(self, event : EventTypes) -> List[DltRecordIdTypes]: + # # TODO: filter events resulting from abstraction computation + # # TODO: filter events resulting from updating remote abstractions + # if self.own_context_id is None: return False + # own_context_uuid = self.own_context_id.context_uuid.uuid + # + # if isinstance(event, ContextEvent): + # context_uuid = event.context_id.context_uuid.uuid + # return context_uuid == own_context_uuid + # elif isinstance(event, TopologyEvent): + # context_uuid = event.topology_id.context_id.context_uuid.uuid + # if context_uuid != own_context_uuid: return True + # topology_uuid = event.topology_id.topology_uuid.uuid + # if topology_uuid in {INTERDOMAIN_TOPOLOGY_UUID}: return True + # + # return False + + def _get_or_create_abstract_device( + self, device_uuid : str, device_type : DeviceTypeEnum, dlt_record_sender : DltRecordSender, + abstract_topology_id : TopologyId + ) -> AbstractDevice: + abstract_device = self.abstract_devices.get(device_uuid) + changed = False + if abstract_device is None: + abstract_device = AbstractDevice(device_uuid, device_type) + changed = abstract_device.initialize() + if changed: dlt_record_sender.add_device(abstract_topology_id, abstract_device.device) + self.abstract_devices[device_uuid] = abstract_device + self.abstract_device_to_topology_id[device_uuid] = abstract_topology_id + return abstract_device + + def _update_abstract_device( + self, device : Device, dlt_record_sender : DltRecordSender, abstract_topology_id : TopologyId, + abstract_device_uuid : Optional[str] = None + ) -> None: + device_uuid = device.device_id.device_uuid.uuid + if device_type_is_datacenter(device.device_type): + abstract_device_uuid = device_uuid + abstract_device = self._get_or_create_abstract_device( + device_uuid, DeviceTypeEnum.EMULATED_DATACENTER, dlt_record_sender, abstract_topology_id) + elif device_type_is_network(device.device_type): + LOGGER.warning('device_type is network; not implemented') + return + else: + abstract_device = self._get_or_create_abstract_device( + abstract_device_uuid, DeviceTypeEnum.NETWORK, dlt_record_sender, abstract_topology_id) + self.real_to_abstract_device_uuid[device_uuid] = abstract_device_uuid + changed = abstract_device.update_endpoints(device) + if changed: dlt_record_sender.add_device(abstract_topology_id, abstract_device.device) + + def _get_or_create_abstract_link( + self, link_uuid : str, dlt_record_sender : DltRecordSender, abstract_topology_id : TopologyId + ) -> AbstractLink: + abstract_link = self.abstract_links.get(link_uuid) + changed = False + if abstract_link is None: + abstract_link = AbstractLink(link_uuid) + changed = abstract_link.initialize() + if changed: dlt_record_sender.add_link(abstract_topology_id, abstract_link.link) + self.abstract_links[link_uuid] = abstract_link + self.abstract_link_to_topology_id[link_uuid] = abstract_topology_id + return abstract_link + + def _get_link_endpoint_data(self, endpoint_id : EndPointId) -> Optional[Tuple[AbstractDevice, EndPoint]]: + device_uuid : str = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid : str = endpoint_id.endpoint_uuid.uuid + abstract_device_uuid = self.real_to_abstract_device_uuid.get(device_uuid) + if abstract_device_uuid is None: return None + abstract_device = self.abstract_devices.get(abstract_device_uuid) + if abstract_device is None: return None + endpoint = abstract_device.get_endpoint(device_uuid, endpoint_uuid) + if endpoint is None: return None + return abstract_device, endpoint + + def _compute_abstract_link(self, link : Link) -> Optional[str]: + if len(link.link_endpoint_ids) != 2: return None + + link_endpoint_data_A = self._get_link_endpoint_data(link.link_endpoint_ids[0]) + if link_endpoint_data_A is None: return None + abstract_device_A, endpoint_A = link_endpoint_data_A + if not endpoint_type_is_border(endpoint_A.endpoint_type): return None + + link_endpoint_data_Z = self._get_link_endpoint_data(link.link_endpoint_ids[-1]) + if link_endpoint_data_Z is None: return None + abstract_device_Z, endpoint_Z = link_endpoint_data_Z + if not endpoint_type_is_border(endpoint_Z.endpoint_type): return None + + link_uuid = AbstractLink.compose_uuid( + abstract_device_A.uuid, endpoint_A.endpoint_id.endpoint_uuid.uuid, + abstract_device_Z.uuid, endpoint_Z.endpoint_id.endpoint_uuid.uuid + ) + + # sort endpoints lexicographically to prevent duplicities + link_endpoint_uuids = sorted([ + (abstract_device_A.uuid, endpoint_A.endpoint_id.endpoint_uuid.uuid), + (abstract_device_Z.uuid, endpoint_Z.endpoint_id.endpoint_uuid.uuid) + ]) + + return link_uuid, link_endpoint_uuids + + def _update_abstract_link( + self, link : Link, dlt_record_sender : DltRecordSender, abstract_topology_id : TopologyId + ) -> None: + abstract_link_specs = self._compute_abstract_link(link) + if abstract_link_specs is None: return + abstract_link_uuid, link_endpoint_uuids = abstract_link_specs + + abstract_link = self._get_or_create_abstract_link(abstract_link_uuid, dlt_record_sender, abstract_topology_id) + link_uuid = link.link_id.link_uuid.uuid + self.real_to_abstract_link_uuid[link_uuid] = abstract_link_uuid + changed = abstract_link.update_endpoints(link_endpoint_uuids) + if changed: dlt_record_sender.add_link(abstract_topology_id, abstract_link.link) + + def _infer_abstract_links(self, device : Device, dlt_record_sender : DltRecordSender) -> None: + device_uuid = device.device_id.device_uuid.uuid + + interdomain_device_uuids = get_uuids_of_devices_in_topology( + self.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID) + + for endpoint in device.device_endpoints: + if not endpoint_type_is_border(endpoint.endpoint_type): continue + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + + abstract_link_uuid = AbstractLink.compose_uuid(device_uuid, endpoint_uuid, endpoint_uuid, device_uuid) + if abstract_link_uuid in self.abstract_links: continue + + if endpoint_uuid not in interdomain_device_uuids: continue + remote_device = self.context_client.GetDevice(DeviceId(**json_device_id(endpoint_uuid))) + remote_device_border_endpoint_uuids = { + endpoint.endpoint_id.endpoint_uuid.uuid : endpoint.endpoint_type + for endpoint in remote_device.device_endpoints + if endpoint_type_is_border(endpoint.endpoint_type) + } + if device_uuid not in remote_device_border_endpoint_uuids: continue + + link_endpoint_uuids = sorted([(device_uuid, endpoint_uuid), (endpoint_uuid, device_uuid)]) + + abstract_link = self._get_or_create_abstract_link( + abstract_link_uuid, dlt_record_sender, INTERDOMAIN_TOPOLOGY_ID) + changed = abstract_link.update_endpoints(link_endpoint_uuids) + if changed: dlt_record_sender.add_link(INTERDOMAIN_TOPOLOGY_ID, abstract_link.link) + + def update_abstraction(self, event : EventTypes) -> None: + dlt_record_sender = DltRecordSender(self.context_client, self.dlt_connector_client) + + if isinstance(event, ContextEvent): + LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event))) + + elif isinstance(event, TopologyEvent): + topology_id = event.topology_id + topology_uuid = topology_id.topology_uuid.uuid + context_id = topology_id.context_id + context_uuid = context_id.context_uuid.uuid + topology_uuids = {DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID} + if (context_uuid == DEFAULT_CONTEXT_UUID) and (topology_uuid not in topology_uuids): + abstract_topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=ADMIN_CONTEXT_ID)) + self._get_or_create_abstract_device( + topology_uuid, DeviceTypeEnum.NETWORK, dlt_record_sender, abstract_topology_id) + + devices = get_devices_in_topology(self.context_client, context_id, topology_uuid) + for device in devices: + self._update_abstract_device( + device, dlt_record_sender, abstract_topology_id, abstract_device_uuid=topology_uuid) + + links = get_links_in_topology(self.context_client, context_id, topology_uuid) + for link in links: + self._update_abstract_link(link, dlt_record_sender, abstract_topology_id) + + for device in devices: + self._infer_abstract_links(device, dlt_record_sender) + + else: + LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event))) + + elif isinstance(event, DeviceEvent): + device_id = event.device_id + device_uuid = device_id.device_uuid.uuid + abstract_device_uuid = self.real_to_abstract_device_uuid.get(device_uuid) + device = self.context_client.GetDevice(device_id) + if abstract_device_uuid is None: + LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event))) + else: + abstract_topology_id = self.abstract_device_to_topology_id[abstract_device_uuid] + self._update_abstract_device( + device, dlt_record_sender, abstract_topology_id, abstract_device_uuid=abstract_device_uuid) + + self._infer_abstract_links(device, dlt_record_sender) + + elif isinstance(event, LinkEvent): + link_id = event.link_id + link_uuid = link_id.link_uuid.uuid + abstract_link_uuid = self.real_to_abstract_link_uuid.get(link_uuid) + if abstract_link_uuid is None: + LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event))) + else: + abstract_topology_id = self.abstract_link_to_topology_id[abstract_link_uuid] + link = self.context_client.GetLink(link_id) + self._update_abstract_link(link, dlt_record_sender, abstract_topology_id) + + else: + LOGGER.warning('Unsupported Event({:s})'.format(grpc_message_to_json_string(event))) + + dlt_record_sender.commit() diff --git a/src/interdomain/service/topology_abstractor/Types.py b/src/interdomain/service/topology_abstractor/Types.py new file mode 100644 index 0000000000000000000000000000000000000000..f6a0fa7a1d7a564045b6e850c2b46cf313da52b7 --- /dev/null +++ b/src/interdomain/service/topology_abstractor/Types.py @@ -0,0 +1,25 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Union +from common.proto.context_pb2 import ( + ConnectionEvent, ContextEvent, Device, DeviceEvent, DeviceId, Link, LinkEvent, LinkId, Service, ServiceEvent, + ServiceId, Slice, SliceEvent, SliceId, TopologyEvent) + +DltRecordIdTypes = Union[DeviceId, LinkId, SliceId, ServiceId] +DltRecordTypes = Union[Device, Link, Slice, Service] + +EventTypes = Union[ + ContextEvent, TopologyEvent, DeviceEvent, LinkEvent, ServiceEvent, SliceEvent, ConnectionEvent +] diff --git a/src/interdomain/service/topology_abstractor/__init__.py b/src/interdomain/service/topology_abstractor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/interdomain/service/topology_abstractor/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/monitoring/service/AlarmManager.py b/src/monitoring/service/AlarmManager.py index d80d815fe50395e360d0fc9e932168fa6e1a0174..873a65d2c8041e6378f84d979bb1fd98d4d61d6b 100644 --- a/src/monitoring/service/AlarmManager.py +++ b/src/monitoring/service/AlarmManager.py @@ -29,7 +29,7 @@ class AlarmManager(): trigger='interval', seconds=(subscription_frequency_ms/1000), start_date=start_date, end_date=end_date,timezone=pytz.utc, id=str(alarm_id)) LOGGER.debug(f"Alarm job {alarm_id} succesfully created") - job.remove() + #job.remove() def delete_alarm(self, alarm_id): try: diff --git a/src/monitoring/service/MonitoringServiceServicerImpl.py b/src/monitoring/service/MonitoringServiceServicerImpl.py index c265d2c9df3b9823b6116309a27ca3270fdd2667..548f34c8a07a1d8df17f2702879dbbadf60f6d13 100644 --- a/src/monitoring/service/MonitoringServiceServicerImpl.py +++ b/src/monitoring/service/MonitoringServiceServicerImpl.py @@ -89,7 +89,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): kpi_connection_id = request.connection_id.connection_uuid.uuid - if request.kpi_id.kpi_id.uuid is not "": + if request.kpi_id.kpi_id.uuid != "": response.kpi_id.uuid = request.kpi_id.kpi_id.uuid # Here the code to modify an existing kpi else: @@ -424,7 +424,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): LOGGER.debug(f"request.AlarmID: {request.alarm_id.alarm_id.uuid}") - if request.alarm_id.alarm_id.uuid is not "": + if request.alarm_id.alarm_id.uuid != "": alarm_id = request.alarm_id.alarm_id.uuid # Here the code to modify an existing alarm else: @@ -596,26 +596,29 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): try: kpi_id = request.kpi_id.uuid response = Kpi() - if kpi_id is "": + if kpi_id == "": LOGGER.info('GetInstantKpi error: KpiID({:s}): not found in database'.format(str(kpi_id))) response.kpi_id.kpi_id.uuid = "NoID" else: query = f"SELECT kpi_id, timestamp, kpi_value FROM monitoring WHERE kpi_id = '{kpi_id}' " \ f"LATEST ON timestamp PARTITION BY kpi_id" - data = self.metrics_db.run_query(query)[0] + data = self.metrics_db.run_query(query) LOGGER.debug(data) - - response.kpi_id.kpi_id.uuid = str(data[0]) - response.timestamp.timestamp = timestamp_string_to_float(data[1]) - response.kpi_value.floatVal = data[2] # This must be improved + if len(data) == 0: + response.kpi_id.kpi_id.uuid = request.kpi_id.uuid + else: + _data = data[0] + response.kpi_id.kpi_id.uuid = str(_data[0]) + response.timestamp.timestamp = timestamp_string_to_float(_data[1]) + response.kpi_value.floatVal = _data[2] return response except ServiceException as e: - LOGGER.exception('SetKpi exception') + LOGGER.exception('GetInstantKpi exception') # CREATEKPI_COUNTER_FAILED.inc() grpc_context.abort(e.code, e.details) except Exception as e: # pragma: no cover - LOGGER.exception('SetKpi exception') + LOGGER.exception('GetInstantKpi exception') # CREATEKPI_COUNTER_FAILED.inc() grpc_context.abort(grpc.StatusCode.INTERNAL, str(e)) diff --git a/src/monitoring/service/SubscriptionManager.py b/src/monitoring/service/SubscriptionManager.py index 6ff922c52dea10b0301ff5f765b045e125e42c05..3d1da36b7c5f66c28d3885a305660d6971f695b1 100644 --- a/src/monitoring/service/SubscriptionManager.py +++ b/src/monitoring/service/SubscriptionManager.py @@ -46,7 +46,7 @@ class SubscriptionManager(): trigger='interval', seconds=sampling_interval_s, start_date=start_date, end_date=end_date, timezone=pytz.utc, id=str(subscription_id)) LOGGER.debug(f"Subscrition job {subscription_id} succesfully created") - job.remove() + #job.remove() def delete_subscription(self, subscription_id): self.scheduler.remove_job(subscription_id) diff --git a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py index 1d55646abffcdb4a882167406ba046aca7bfa651..205306d0ec2d156a2050d1f95c5c1e990796e018 100644 --- a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py +++ b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py @@ -13,11 +13,16 @@ # limitations under the License. import grpc, logging -from common.proto.context_pb2 import Empty +from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.proto.context_pb2 import ContextId, Empty from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest from common.proto.pathcomp_pb2_grpc import PathCompServiceServicer from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.tools.context_queries.Device import get_devices_in_topology +from common.tools.context_queries.Link import get_links_in_topology +from common.tools.context_queries.InterDomain import is_inter_domain from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from pathcomp.frontend.service.algorithms.Factory import get_algorithm @@ -27,6 +32,8 @@ SERVICE_NAME = 'PathComp' METHOD_NAMES = ['Compute'] METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) + class PathCompServiceServicerImpl(PathCompServiceServicer): def __init__(self) -> None: LOGGER.debug('Creating Servicer...') @@ -38,11 +45,18 @@ class PathCompServiceServicerImpl(PathCompServiceServicer): context_client = ContextClient() - # TODO: add filtering of devices and links - # TODO: add contexts, topologies, and membership of devices/links in topologies + if (len(request.services) == 1) and is_inter_domain(context_client, request.services[0].service_endpoint_ids): + devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID) + links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID) + else: + # TODO: improve filtering of devices and links + # TODO: add contexts, topologies, and membership of devices/links in topologies + devices = context_client.ListDevices(Empty()) + links = context_client.ListLinks(Empty()) + algorithm = get_algorithm(request) - algorithm.add_devices(context_client.ListDevices(Empty())) - algorithm.add_links(context_client.ListLinks(Empty())) + algorithm.add_devices(devices) + algorithm.add_links(links) algorithm.add_service_requests(request) #LOGGER.debug('device_list = {:s}' .format(str(algorithm.device_list ))) diff --git a/src/pathcomp/frontend/service/algorithms/ShortestPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/ShortestPathAlgorithm.py index d5f937fd207807ba650669ea9fb2395b2e21b164..e0a2441823627843f1e14bde905da4f82ed7a593 100644 --- a/src/pathcomp/frontend/service/algorithms/ShortestPathAlgorithm.py +++ b/src/pathcomp/frontend/service/algorithms/ShortestPathAlgorithm.py @@ -12,15 +12,42 @@ # See the License for the specific language governing permissions and # limitations under the License. -from common.proto.pathcomp_pb2 import Algorithm_ShortestPath +from typing import Dict, Optional +from common.proto.pathcomp_pb2 import Algorithm_ShortestPath, PathCompRequest from ._Algorithm import _Algorithm class ShortestPathAlgorithm(_Algorithm): def __init__(self, algorithm : Algorithm_ShortestPath, class_name=__name__) -> None: super().__init__('SP', False, class_name=class_name) - def add_service_requests(self, requested_services) -> None: - super().add_service_requests(requested_services) + def add_service_requests(self, request : PathCompRequest) -> None: + super().add_service_requests(request) for service_request in self.service_list: service_request['algId' ] = self.algorithm_id service_request['syncPaths'] = self.sync_paths + + def _single_device_request(self) -> Optional[Dict]: + if len(self.service_list) != 1: return None + service = self.service_list[0] + endpoint_ids = service['service_endpoints_ids'] + if len(endpoint_ids) != 2: return None + if endpoint_ids[0]['device_id'] != endpoint_ids[-1]['device_id']: return None + return {'response-list': [{ + 'serviceId': service['serviceId'], + 'service_endpoints_ids': [endpoint_ids[0], endpoint_ids[-1]], + 'path': [{ + # not used by now + #'path-capacity': {'total-size': {'value': 200, 'unit': 0}}, + #'path-latency': {'fixed-latency-characteristic': '2.000000'}, + #'path-cost': {'cost-name': '', 'cost-value': '1.000000', 'cost-algorithm': '0.000000'}, + 'devices': [endpoint_ids[0], endpoint_ids[-1]] + }] + }]} + + def execute(self, dump_request_filename : Optional[str] = None, dump_reply_filename : Optional[str] = None) -> None: + # if request is composed of a single service with single device (not supported by backend), + # produce synthetic reply directly + self.json_reply = self._single_device_request() + if self.json_reply is None: + # otherwise, follow normal logic through the backend + return super().execute(dump_request_filename, dump_reply_filename) diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index b798813a83d984d6d1d75450529e9c826e220624..3833642457bc5f8c2ba7b7d09f384a87dfabe41d 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -13,7 +13,7 @@ # limitations under the License. import json, logging, requests -from typing import Dict, List, Optional, Tuple +from typing import Dict, List, Optional, Tuple, Union from common.proto.context_pb2 import ( ConfigRule, Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum, ServiceTypeEnum) @@ -23,7 +23,8 @@ from pathcomp.frontend.Config import BACKEND_URL from pathcomp.frontend.service.algorithms.tools.ConstantsMappings import DEVICE_LAYER_TO_SERVICE_TYPE, DeviceLayerEnum from .tools.EroPathToHops import eropath_to_hops from .tools.ComposeRequest import compose_device, compose_link, compose_service -from .tools.ComputeSubServices import convert_explicit_path_hops_to_connections +from .tools.ComputeSubServices import ( + convert_explicit_path_hops_to_connections, convert_explicit_path_hops_to_plain_connection) class _Algorithm: def __init__(self, algorithm_id : str, sync_paths : bool, class_name=__name__) -> None: @@ -46,8 +47,9 @@ class _Algorithm: self.service_list : List[Dict] = list() self.service_dict : Dict[Tuple[str, str], Tuple[Dict, Service]] = dict() - def add_devices(self, grpc_devices : DeviceList) -> None: - for grpc_device in grpc_devices.devices: + def add_devices(self, grpc_devices : Union[List[Device], DeviceList]) -> None: + if isinstance(grpc_devices, DeviceList): grpc_devices = grpc_devices.devices + for grpc_device in grpc_devices: json_device = compose_device(grpc_device) self.device_list.append(json_device) @@ -62,8 +64,9 @@ class _Algorithm: self.endpoint_dict[device_uuid] = device_endpoint_dict - def add_links(self, grpc_links : LinkList) -> None: - for grpc_link in grpc_links.links: + def add_links(self, grpc_links : Union[List[Link], LinkList]) -> None: + if isinstance(grpc_links, LinkList): grpc_links = grpc_links.links + for grpc_link in grpc_links: json_link = compose_link(grpc_link) self.link_list.append(json_link) @@ -134,7 +137,8 @@ class _Algorithm: def add_service_to_reply( self, reply : PathCompReply, context_uuid : str, service_uuid : str, - device_layer : Optional[DeviceLayerEnum] = None, path_hops : List[Dict] = [] + device_layer : Optional[DeviceLayerEnum] = None, path_hops : List[Dict] = [], + config_rules : List = [] ) -> Service: # TODO: implement support for multi-point services # Control deactivated to enable disjoint paths with multiple redundant endpoints on each side @@ -168,6 +172,8 @@ class _Algorithm: } config_rule = ConfigRule(**json_config_rule_set('/settings', json_tapi_settings)) service.service_config.config_rules.append(config_rule) + else: + service.service_config.config_rules.extend(config_rules) service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED @@ -192,7 +198,8 @@ class _Algorithm: context_uuid = service_id['contextId'] service_uuid = service_id['service_uuid'] service_key = (context_uuid, service_uuid) - grpc_services[service_key] = self.add_service_to_reply(reply, context_uuid, service_uuid) + upper_service = self.add_service_to_reply(reply, context_uuid, service_uuid) + grpc_services[service_key] = upper_service no_path_issue = response.get('noPath', {}).get('issue') if no_path_issue is not None: @@ -202,15 +209,22 @@ class _Algorithm: for service_path_ero in response['path']: path_hops = eropath_to_hops(service_path_ero['devices'], self.endpoint_to_link_dict) - connections = convert_explicit_path_hops_to_connections(path_hops, self.device_dict, service_uuid) + try: + connections = convert_explicit_path_hops_to_connections(path_hops, self.device_dict, service_uuid) + except: # pylint: disable=bare-except + # if not able to extrapolate sub-services and sub-connections, + # assume single service and single connection + connections = convert_explicit_path_hops_to_plain_connection(path_hops, service_uuid) for connection in connections: connection_uuid,device_layer,path_hops,_ = connection service_key = (context_uuid, connection_uuid) grpc_service = grpc_services.get(service_key) if grpc_service is None: + config_rules = upper_service.service_config.config_rules grpc_service = self.add_service_to_reply( - reply, context_uuid, connection_uuid, device_layer=device_layer, path_hops=path_hops) + reply, context_uuid, connection_uuid, device_layer=device_layer, path_hops=path_hops, + config_rules=config_rules) grpc_services[service_key] = grpc_service for connection in connections: diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py index c1977cedb9b341fbb767a5fb8c829cd5f633884c..17a7e74ef573e4926d53045ab8888c71a3dd73d7 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py @@ -34,9 +34,11 @@ def compose_topology_id(topology_id : TopologyId) -> Dict: return {'contextId': context_uuid, 'topology_uuid': topology_uuid} def compose_service_id(service_id : ServiceId) -> Dict: - context_uuid = service_id.context_id.context_uuid.uuid - - if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_UUID + # force context_uuid to be always DEFAULT_CONTEXT_UUID for simplicity + # for interdomain contexts are managed in a particular way + #context_uuid = service_id.context_id.context_uuid.uuid + #if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_UUID + context_uuid = DEFAULT_CONTEXT_UUID service_uuid = service_id.service_uuid.uuid return {'contextId': context_uuid, 'service_uuid': service_uuid} diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py index f2c66cb24ca3c15c71f22dbe4eeca634e18d985a..7c7b62e2d039d2e6bad979b3601e09ca1c54ea51 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py @@ -94,3 +94,19 @@ def convert_explicit_path_hops_to_connections( connections.append(connection_stack.get()) assert connection_stack.empty() return connections + +def convert_explicit_path_hops_to_plain_connection( + path_hops : List[Dict], main_connection_uuid : str +) -> List[Tuple[str, DeviceLayerEnum, List[str], List[str]]]: + + connection : Tuple[str, DeviceLayerEnum, List[str], List[str]] = \ + (main_connection_uuid, DeviceLayerEnum.PACKET_DEVICE, [], []) + + last_device_uuid = None + for path_hop in path_hops: + device_uuid = path_hop['device'] + if last_device_uuid == device_uuid: continue + connection[2].append(path_hop) + last_device_uuid = device_uuid + + return [connection] diff --git a/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py b/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py index 8561ab110ad09b52c3040063241c0cc90dbbb223..56e11b1b4a0293bcdbed2f1d3cd7c08814d7b161 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py @@ -78,29 +78,38 @@ class DeviceLayerEnum(IntEnum): OPTICAL_DEVICE = 0 # Layer 0 domain device DEVICE_TYPE_TO_LAYER = { - DeviceTypeEnum.EMULATED_DATACENTER.value : DeviceLayerEnum.APPLICATION_DEVICE, - DeviceTypeEnum.DATACENTER.value : DeviceLayerEnum.APPLICATION_DEVICE, + DeviceTypeEnum.EMULATED_DATACENTER.value : DeviceLayerEnum.APPLICATION_DEVICE, + DeviceTypeEnum.DATACENTER.value : DeviceLayerEnum.APPLICATION_DEVICE, + DeviceTypeEnum.NETWORK.value : DeviceLayerEnum.APPLICATION_DEVICE, - DeviceTypeEnum.EMULATED_PACKET_ROUTER.value : DeviceLayerEnum.PACKET_DEVICE, - DeviceTypeEnum.PACKET_ROUTER.value : DeviceLayerEnum.PACKET_DEVICE, - DeviceTypeEnum.EMULATED_PACKET_SWITCH.value : DeviceLayerEnum.MAC_LAYER_DEVICE, - DeviceTypeEnum.PACKET_SWITCH.value : DeviceLayerEnum.MAC_LAYER_DEVICE, - DeviceTypeEnum.P4_SWITCH.value : DeviceLayerEnum.MAC_LAYER_DEVICE, + DeviceTypeEnum.EMULATED_PACKET_ROUTER.value : DeviceLayerEnum.PACKET_DEVICE, + DeviceTypeEnum.PACKET_ROUTER.value : DeviceLayerEnum.PACKET_DEVICE, + DeviceTypeEnum.EMULATED_PACKET_SWITCH.value : DeviceLayerEnum.MAC_LAYER_DEVICE, + DeviceTypeEnum.PACKET_SWITCH.value : DeviceLayerEnum.MAC_LAYER_DEVICE, - DeviceTypeEnum.MICROVAWE_RADIO_SYSTEM.value : DeviceLayerEnum.MAC_LAYER_CONTROLLER, + DeviceTypeEnum.EMULATED_P4_SWITCH.value : DeviceLayerEnum.MAC_LAYER_DEVICE, + DeviceTypeEnum.P4_SWITCH.value : DeviceLayerEnum.MAC_LAYER_DEVICE, - DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value: DeviceLayerEnum.OPTICAL_CONTROLLER, - DeviceTypeEnum.OPEN_LINE_SYSTEM.value : DeviceLayerEnum.OPTICAL_CONTROLLER, + DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value : DeviceLayerEnum.MAC_LAYER_CONTROLLER, + DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value : DeviceLayerEnum.MAC_LAYER_CONTROLLER, - DeviceTypeEnum.OPTICAL_ROADM.value : DeviceLayerEnum.OPTICAL_DEVICE, - DeviceTypeEnum.OPTICAL_TRANSPONDER.value : DeviceLayerEnum.OPTICAL_DEVICE, + DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value : DeviceLayerEnum.OPTICAL_CONTROLLER, + DeviceTypeEnum.OPEN_LINE_SYSTEM.value : DeviceLayerEnum.OPTICAL_CONTROLLER, + DeviceTypeEnum.XR_CONSTELLATION.value : DeviceLayerEnum.OPTICAL_CONTROLLER, + + DeviceTypeEnum.EMULATED_OPTICAL_ROADM.value : DeviceLayerEnum.OPTICAL_DEVICE, + DeviceTypeEnum.OPTICAL_ROADM.value : DeviceLayerEnum.OPTICAL_DEVICE, + DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER.value : DeviceLayerEnum.OPTICAL_DEVICE, + DeviceTypeEnum.OPTICAL_TRANSPONDER.value : DeviceLayerEnum.OPTICAL_DEVICE, } DEVICE_LAYER_TO_SERVICE_TYPE = { - DeviceLayerEnum.APPLICATION_DEVICE.value: ServiceTypeEnum.SERVICETYPE_L3NM, + DeviceLayerEnum.APPLICATION_DEVICE.value : ServiceTypeEnum.SERVICETYPE_L3NM, + DeviceLayerEnum.PACKET_DEVICE.value : ServiceTypeEnum.SERVICETYPE_L3NM, - DeviceLayerEnum.PACKET_DEVICE.value : ServiceTypeEnum.SERVICETYPE_L3NM, - DeviceLayerEnum.MAC_LAYER_DEVICE.value : ServiceTypeEnum.SERVICETYPE_L2NM, + DeviceLayerEnum.MAC_LAYER_CONTROLLER.value : ServiceTypeEnum.SERVICETYPE_L2NM, + DeviceLayerEnum.MAC_LAYER_DEVICE.value : ServiceTypeEnum.SERVICETYPE_L2NM, - DeviceLayerEnum.OPTICAL_CONTROLLER.value: ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, + DeviceLayerEnum.OPTICAL_CONTROLLER.value : ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, + DeviceLayerEnum.OPTICAL_DEVICE.value : ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, } diff --git a/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py b/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py index 021940937c23a7cb461a603aa32a15f16626eb1d..a885ddb29c3fa70d6bccea18f43fef5b038aae68 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py +++ b/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py @@ -42,35 +42,43 @@ # ] # +import logging from typing import Dict, List +LOGGER = logging.getLogger(__name__) + def eropath_to_hops(ero_path : List[Dict], endpoint_to_link_dict : Dict) -> List[Dict]: - path_hops = [] - for endpoint in ero_path: - device_uuid = endpoint['device_id'] - endpoint_uuid = endpoint['endpoint_uuid'] + try: + path_hops = [] + for endpoint in ero_path: + device_uuid = endpoint['device_id'] + endpoint_uuid = endpoint['endpoint_uuid'] - if len(path_hops) == 0: - path_hops.append({'device': device_uuid, 'ingress_ep': endpoint_uuid}) - continue + if len(path_hops) == 0: + path_hops.append({'device': device_uuid, 'ingress_ep': endpoint_uuid}) + continue - last_hop = path_hops[-1] - if (last_hop['device'] == device_uuid): - if ('ingress_ep' not in last_hop) or ('egress_ep' in last_hop): continue - last_hop['egress_ep'] = endpoint_uuid - continue + last_hop = path_hops[-1] + if (last_hop['device'] == device_uuid): + if ('ingress_ep' not in last_hop) or ('egress_ep' in last_hop): continue + last_hop['egress_ep'] = endpoint_uuid + continue - endpoint_key = (last_hop['device'], last_hop['egress_ep']) - link_tuple = endpoint_to_link_dict.get(endpoint_key) - ingress = next(iter([ - ep_id for ep_id in link_tuple[0]['link_endpoint_ids'] - if (ep_id['endpoint_id']['device_id'] == device_uuid) and\ - (ep_id['endpoint_id']['endpoint_uuid'] != endpoint_uuid) - ]), None) - if ingress['endpoint_id']['device_id'] != device_uuid: raise Exception('Malformed path') - path_hops.append({ - 'device': ingress['endpoint_id']['device_id'], - 'ingress_ep': ingress['endpoint_id']['endpoint_uuid'], - 'egress_ep': endpoint_uuid, - }) - return path_hops + endpoint_key = (last_hop['device'], last_hop['egress_ep']) + link_tuple = endpoint_to_link_dict.get(endpoint_key) + ingress = next(iter([ + ep_id for ep_id in link_tuple[0]['link_endpoint_ids'] + if (ep_id['endpoint_id']['device_id'] == device_uuid) and\ + (ep_id['endpoint_id']['endpoint_uuid'] != endpoint_uuid) + ]), None) + if ingress['endpoint_id']['device_id'] != device_uuid: raise Exception('Malformed path') + path_hops.append({ + 'device': ingress['endpoint_id']['device_id'], + 'ingress_ep': ingress['endpoint_id']['endpoint_uuid'], + 'egress_ep': endpoint_uuid, + }) + return path_hops + except: + LOGGER.exception('Unhandled exception: ero_path={:s} endpoint_to_link_dict={:s}'.format( + str(ero_path), str(endpoint_to_link_dict))) + raise diff --git a/src/policy/src/main/java/eu/teraflow/policy/Serializer.java b/src/policy/src/main/java/eu/teraflow/policy/Serializer.java index 88ebd332c719f42e1345e3c0f7fbbb734cdf42ba..4c43f39006227065d33a2f9db496013f503695fd 100644 --- a/src/policy/src/main/java/eu/teraflow/policy/Serializer.java +++ b/src/policy/src/main/java/eu/teraflow/policy/Serializer.java @@ -2243,6 +2243,8 @@ public class Serializer { return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY; case ONF_TR_352: return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352; + case XR: + return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_XR; case UNDEFINED: default: return ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_UNDEFINED; @@ -2262,6 +2264,8 @@ public class Serializer { return DeviceDriverEnum.IETF_NETWORK_TOPOLOGY; case DEVICEDRIVER_ONF_TR_352: return DeviceDriverEnum.ONF_TR_352; + case DEVICEDRIVER_XR: + return DeviceDriverEnum.XR; case DEVICEDRIVER_UNDEFINED: case UNRECOGNIZED: default: diff --git a/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java b/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java index c98fc1fce545974a8067db2667ec1e519a058ae2..ee1ebcbcf96962d06be6915f40acfd8230483655 100644 --- a/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java +++ b/src/policy/src/main/java/eu/teraflow/policy/context/model/DeviceDriverEnum.java @@ -22,5 +22,6 @@ public enum DeviceDriverEnum { TRANSPORT_API, P4, IETF_NETWORK_TOPOLOGY, - ONF_TR_352 + ONF_TR_352, + XR } diff --git a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java index 0f27fe20c93fd133cf24c7f6c0d8ce5baa0b7d37..7e6967e0c22c2aa4b88d4b8a77c87f20023d3c2a 100644 --- a/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java +++ b/src/policy/src/test/java/eu/teraflow/policy/SerializerTest.java @@ -3599,6 +3599,7 @@ class SerializerTest { Arguments.of( DeviceDriverEnum.ONF_TR_352, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352), + Arguments.of(DeviceDriverEnum.XR, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_XR), Arguments.of( DeviceDriverEnum.UNDEFINED, ContextOuterClass.DeviceDriverEnum.DEVICEDRIVER_UNDEFINED)); } diff --git a/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java b/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java index 3c0d7ce36fcdc4e47697ba11a4ceb3d8e8cdea0c..fbbba62a2baa1c2fe2b3c3fe090883d6542996e4 100644 --- a/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java +++ b/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java @@ -173,6 +173,10 @@ public final class ContextOuterClass { * <code>DEVICEDRIVER_ONF_TR_352 = 5;</code> */ DEVICEDRIVER_ONF_TR_352(5), + /** + * <code>DEVICEDRIVER_XR = 6;</code> + */ + DEVICEDRIVER_XR(6), UNRECOGNIZED(-1), ; @@ -204,6 +208,10 @@ public final class ContextOuterClass { * <code>DEVICEDRIVER_ONF_TR_352 = 5;</code> */ public static final int DEVICEDRIVER_ONF_TR_352_VALUE = 5; + /** + * <code>DEVICEDRIVER_XR = 6;</code> + */ + public static final int DEVICEDRIVER_XR_VALUE = 6; public final int getNumber() { @@ -236,6 +244,7 @@ public final class ContextOuterClass { case 3: return DEVICEDRIVER_P4; case 4: return DEVICEDRIVER_IETF_NETWORK_TOPOLOGY; case 5: return DEVICEDRIVER_ONF_TR_352; + case 6: return DEVICEDRIVER_XR; default: return null; } } @@ -62318,100 +62327,100 @@ public final class ContextOuterClass { "ntext.ContextId\022\025\n\rauthenticated\030\002 \001(\010*j" + "\n\rEventTypeEnum\022\027\n\023EVENTTYPE_UNDEFINED\020\000" + "\022\024\n\020EVENTTYPE_CREATE\020\001\022\024\n\020EVENTTYPE_UPDA" + - "TE\020\002\022\024\n\020EVENTTYPE_REMOVE\020\003*\305\001\n\020DeviceDri" + + "TE\020\002\022\024\n\020EVENTTYPE_REMOVE\020\003*\332\001\n\020DeviceDri" + "verEnum\022\032\n\026DEVICEDRIVER_UNDEFINED\020\000\022\033\n\027D" + "EVICEDRIVER_OPENCONFIG\020\001\022\036\n\032DEVICEDRIVER" + "_TRANSPORT_API\020\002\022\023\n\017DEVICEDRIVER_P4\020\003\022&\n" + "\"DEVICEDRIVER_IETF_NETWORK_TOPOLOGY\020\004\022\033\n" + - "\027DEVICEDRIVER_ONF_TR_352\020\005*\217\001\n\033DeviceOpe" + - "rationalStatusEnum\022%\n!DEVICEOPERATIONALS" + - "TATUS_UNDEFINED\020\000\022$\n DEVICEOPERATIONALST" + - "ATUS_DISABLED\020\001\022#\n\037DEVICEOPERATIONALSTAT" + - "US_ENABLED\020\002*\201\001\n\017ServiceTypeEnum\022\027\n\023SERV" + - "ICETYPE_UNKNOWN\020\000\022\024\n\020SERVICETYPE_L3NM\020\001\022" + - "\024\n\020SERVICETYPE_L2NM\020\002\022)\n%SERVICETYPE_TAP" + - "I_CONNECTIVITY_SERVICE\020\003*\250\001\n\021ServiceStat" + - "usEnum\022\033\n\027SERVICESTATUS_UNDEFINED\020\000\022\031\n\025S" + - "ERVICESTATUS_PLANNED\020\001\022\030\n\024SERVICESTATUS_" + - "ACTIVE\020\002\022!\n\035SERVICESTATUS_PENDING_REMOVA" + - "L\020\003\022\036\n\032SERVICESTATUS_SLA_VIOLATED\020\004*\251\001\n\017" + - "SliceStatusEnum\022\031\n\025SLICESTATUS_UNDEFINED" + - "\020\000\022\027\n\023SLICESTATUS_PLANNED\020\001\022\024\n\020SLICESTAT" + - "US_INIT\020\002\022\026\n\022SLICESTATUS_ACTIVE\020\003\022\026\n\022SLI" + - "CESTATUS_DEINIT\020\004\022\034\n\030SLICESTATUS_SLA_VIO" + - "LATED\020\005*]\n\020ConfigActionEnum\022\032\n\026CONFIGACT" + - "ION_UNDEFINED\020\000\022\024\n\020CONFIGACTION_SET\020\001\022\027\n" + - "\023CONFIGACTION_DELETE\020\002*\203\002\n\022IsolationLeve" + - "lEnum\022\020\n\014NO_ISOLATION\020\000\022\026\n\022PHYSICAL_ISOL" + - "ATION\020\001\022\025\n\021LOGICAL_ISOLATION\020\002\022\025\n\021PROCES" + - "S_ISOLATION\020\003\022\035\n\031PHYSICAL_MEMORY_ISOLATI" + - "ON\020\004\022\036\n\032PHYSICAL_NETWORK_ISOLATION\020\005\022\036\n\032" + - "VIRTUAL_RESOURCE_ISOLATION\020\006\022\037\n\033NETWORK_" + - "FUNCTIONS_ISOLATION\020\007\022\025\n\021SERVICE_ISOLATI" + - "ON\020\0102\331\023\n\016ContextService\022:\n\016ListContextId" + - "s\022\016.context.Empty\032\026.context.ContextIdLis" + - "t\"\000\0226\n\014ListContexts\022\016.context.Empty\032\024.co" + - "ntext.ContextList\"\000\0224\n\nGetContext\022\022.cont" + - "ext.ContextId\032\020.context.Context\"\000\0224\n\nSet" + - "Context\022\020.context.Context\032\022.context.Cont" + - "extId\"\000\0225\n\rRemoveContext\022\022.context.Conte" + - "xtId\032\016.context.Empty\"\000\022=\n\020GetContextEven" + - "ts\022\016.context.Empty\032\025.context.ContextEven" + - "t\"\0000\001\022@\n\017ListTopologyIds\022\022.context.Conte" + - "xtId\032\027.context.TopologyIdList\"\000\022=\n\016ListT" + - "opologies\022\022.context.ContextId\032\025.context." + - "TopologyList\"\000\0227\n\013GetTopology\022\023.context." + - "TopologyId\032\021.context.Topology\"\000\0227\n\013SetTo" + - "pology\022\021.context.Topology\032\023.context.Topo" + - "logyId\"\000\0227\n\016RemoveTopology\022\023.context.Top" + - "ologyId\032\016.context.Empty\"\000\022?\n\021GetTopology" + - "Events\022\016.context.Empty\032\026.context.Topolog" + - "yEvent\"\0000\001\0228\n\rListDeviceIds\022\016.context.Em" + - "pty\032\025.context.DeviceIdList\"\000\0224\n\013ListDevi" + - "ces\022\016.context.Empty\032\023.context.DeviceList" + - "\"\000\0221\n\tGetDevice\022\021.context.DeviceId\032\017.con" + - "text.Device\"\000\0221\n\tSetDevice\022\017.context.Dev" + - "ice\032\021.context.DeviceId\"\000\0223\n\014RemoveDevice" + - "\022\021.context.DeviceId\032\016.context.Empty\"\000\022;\n" + - "\017GetDeviceEvents\022\016.context.Empty\032\024.conte" + - "xt.DeviceEvent\"\0000\001\0224\n\013ListLinkIds\022\016.cont" + - "ext.Empty\032\023.context.LinkIdList\"\000\0220\n\tList" + - "Links\022\016.context.Empty\032\021.context.LinkList" + - "\"\000\022+\n\007GetLink\022\017.context.LinkId\032\r.context" + - ".Link\"\000\022+\n\007SetLink\022\r.context.Link\032\017.cont" + - "ext.LinkId\"\000\022/\n\nRemoveLink\022\017.context.Lin" + - "kId\032\016.context.Empty\"\000\0227\n\rGetLinkEvents\022\016" + - ".context.Empty\032\022.context.LinkEvent\"\0000\001\022>" + - "\n\016ListServiceIds\022\022.context.ContextId\032\026.c" + - "ontext.ServiceIdList\"\000\022:\n\014ListServices\022\022" + - ".context.ContextId\032\024.context.ServiceList" + - "\"\000\0224\n\nGetService\022\022.context.ServiceId\032\020.c" + - "ontext.Service\"\000\0224\n\nSetService\022\020.context" + - ".Service\032\022.context.ServiceId\"\000\0226\n\014UnsetS" + - "ervice\022\020.context.Service\032\022.context.Servi" + - "ceId\"\000\0225\n\rRemoveService\022\022.context.Servic" + - "eId\032\016.context.Empty\"\000\022=\n\020GetServiceEvent" + - "s\022\016.context.Empty\032\025.context.ServiceEvent" + - "\"\0000\001\022:\n\014ListSliceIds\022\022.context.ContextId" + - "\032\024.context.SliceIdList\"\000\0226\n\nListSlices\022\022" + - ".context.ContextId\032\022.context.SliceList\"\000" + - "\022.\n\010GetSlice\022\020.context.SliceId\032\016.context" + - ".Slice\"\000\022.\n\010SetSlice\022\016.context.Slice\032\020.c" + - "ontext.SliceId\"\000\0220\n\nUnsetSlice\022\016.context" + - ".Slice\032\020.context.SliceId\"\000\0221\n\013RemoveSlic" + - "e\022\020.context.SliceId\032\016.context.Empty\"\000\0229\n" + - "\016GetSliceEvents\022\016.context.Empty\032\023.contex" + - "t.SliceEvent\"\0000\001\022D\n\021ListConnectionIds\022\022." + - "context.ServiceId\032\031.context.ConnectionId" + - "List\"\000\022@\n\017ListConnections\022\022.context.Serv" + - "iceId\032\027.context.ConnectionList\"\000\022=\n\rGetC" + - "onnection\022\025.context.ConnectionId\032\023.conte" + - "xt.Connection\"\000\022=\n\rSetConnection\022\023.conte" + - "xt.Connection\032\025.context.ConnectionId\"\000\022;" + - "\n\020RemoveConnection\022\025.context.ConnectionI" + - "d\032\016.context.Empty\"\000\022C\n\023GetConnectionEven" + - "ts\022\016.context.Empty\032\030.context.ConnectionE" + - "vent\"\0000\001b\006proto3" + "\027DEVICEDRIVER_ONF_TR_352\020\005\022\023\n\017DEVICEDRIV" + + "ER_XR\020\006*\217\001\n\033DeviceOperationalStatusEnum\022" + + "%\n!DEVICEOPERATIONALSTATUS_UNDEFINED\020\000\022$" + + "\n DEVICEOPERATIONALSTATUS_DISABLED\020\001\022#\n\037" + + "DEVICEOPERATIONALSTATUS_ENABLED\020\002*\201\001\n\017Se" + + "rviceTypeEnum\022\027\n\023SERVICETYPE_UNKNOWN\020\000\022\024" + + "\n\020SERVICETYPE_L3NM\020\001\022\024\n\020SERVICETYPE_L2NM" + + "\020\002\022)\n%SERVICETYPE_TAPI_CONNECTIVITY_SERV" + + "ICE\020\003*\250\001\n\021ServiceStatusEnum\022\033\n\027SERVICEST" + + "ATUS_UNDEFINED\020\000\022\031\n\025SERVICESTATUS_PLANNE" + + "D\020\001\022\030\n\024SERVICESTATUS_ACTIVE\020\002\022!\n\035SERVICE" + + "STATUS_PENDING_REMOVAL\020\003\022\036\n\032SERVICESTATU" + + "S_SLA_VIOLATED\020\004*\251\001\n\017SliceStatusEnum\022\031\n\025" + + "SLICESTATUS_UNDEFINED\020\000\022\027\n\023SLICESTATUS_P" + + "LANNED\020\001\022\024\n\020SLICESTATUS_INIT\020\002\022\026\n\022SLICES" + + "TATUS_ACTIVE\020\003\022\026\n\022SLICESTATUS_DEINIT\020\004\022\034" + + "\n\030SLICESTATUS_SLA_VIOLATED\020\005*]\n\020ConfigAc" + + "tionEnum\022\032\n\026CONFIGACTION_UNDEFINED\020\000\022\024\n\020" + + "CONFIGACTION_SET\020\001\022\027\n\023CONFIGACTION_DELET" + + "E\020\002*\203\002\n\022IsolationLevelEnum\022\020\n\014NO_ISOLATI" + + "ON\020\000\022\026\n\022PHYSICAL_ISOLATION\020\001\022\025\n\021LOGICAL_" + + "ISOLATION\020\002\022\025\n\021PROCESS_ISOLATION\020\003\022\035\n\031PH" + + "YSICAL_MEMORY_ISOLATION\020\004\022\036\n\032PHYSICAL_NE" + + "TWORK_ISOLATION\020\005\022\036\n\032VIRTUAL_RESOURCE_IS" + + "OLATION\020\006\022\037\n\033NETWORK_FUNCTIONS_ISOLATION" + + "\020\007\022\025\n\021SERVICE_ISOLATION\020\0102\331\023\n\016ContextSer" + + "vice\022:\n\016ListContextIds\022\016.context.Empty\032\026" + + ".context.ContextIdList\"\000\0226\n\014ListContexts" + + "\022\016.context.Empty\032\024.context.ContextList\"\000" + + "\0224\n\nGetContext\022\022.context.ContextId\032\020.con" + + "text.Context\"\000\0224\n\nSetContext\022\020.context.C" + + "ontext\032\022.context.ContextId\"\000\0225\n\rRemoveCo" + + "ntext\022\022.context.ContextId\032\016.context.Empt" + + "y\"\000\022=\n\020GetContextEvents\022\016.context.Empty\032" + + "\025.context.ContextEvent\"\0000\001\022@\n\017ListTopolo" + + "gyIds\022\022.context.ContextId\032\027.context.Topo" + + "logyIdList\"\000\022=\n\016ListTopologies\022\022.context" + + ".ContextId\032\025.context.TopologyList\"\000\0227\n\013G" + + "etTopology\022\023.context.TopologyId\032\021.contex" + + "t.Topology\"\000\0227\n\013SetTopology\022\021.context.To" + + "pology\032\023.context.TopologyId\"\000\0227\n\016RemoveT" + + "opology\022\023.context.TopologyId\032\016.context.E" + + "mpty\"\000\022?\n\021GetTopologyEvents\022\016.context.Em" + + "pty\032\026.context.TopologyEvent\"\0000\001\0228\n\rListD" + + "eviceIds\022\016.context.Empty\032\025.context.Devic" + + "eIdList\"\000\0224\n\013ListDevices\022\016.context.Empty" + + "\032\023.context.DeviceList\"\000\0221\n\tGetDevice\022\021.c" + + "ontext.DeviceId\032\017.context.Device\"\000\0221\n\tSe" + + "tDevice\022\017.context.Device\032\021.context.Devic" + + "eId\"\000\0223\n\014RemoveDevice\022\021.context.DeviceId" + + "\032\016.context.Empty\"\000\022;\n\017GetDeviceEvents\022\016." + + "context.Empty\032\024.context.DeviceEvent\"\0000\001\022" + + "4\n\013ListLinkIds\022\016.context.Empty\032\023.context" + + ".LinkIdList\"\000\0220\n\tListLinks\022\016.context.Emp" + + "ty\032\021.context.LinkList\"\000\022+\n\007GetLink\022\017.con" + + "text.LinkId\032\r.context.Link\"\000\022+\n\007SetLink\022" + + "\r.context.Link\032\017.context.LinkId\"\000\022/\n\nRem" + + "oveLink\022\017.context.LinkId\032\016.context.Empty" + + "\"\000\0227\n\rGetLinkEvents\022\016.context.Empty\032\022.co" + + "ntext.LinkEvent\"\0000\001\022>\n\016ListServiceIds\022\022." + + "context.ContextId\032\026.context.ServiceIdLis" + + "t\"\000\022:\n\014ListServices\022\022.context.ContextId\032" + + "\024.context.ServiceList\"\000\0224\n\nGetService\022\022." + + "context.ServiceId\032\020.context.Service\"\000\0224\n" + + "\nSetService\022\020.context.Service\032\022.context." + + "ServiceId\"\000\0226\n\014UnsetService\022\020.context.Se" + + "rvice\032\022.context.ServiceId\"\000\0225\n\rRemoveSer" + + "vice\022\022.context.ServiceId\032\016.context.Empty" + + "\"\000\022=\n\020GetServiceEvents\022\016.context.Empty\032\025" + + ".context.ServiceEvent\"\0000\001\022:\n\014ListSliceId" + + "s\022\022.context.ContextId\032\024.context.SliceIdL" + + "ist\"\000\0226\n\nListSlices\022\022.context.ContextId\032" + + "\022.context.SliceList\"\000\022.\n\010GetSlice\022\020.cont" + + "ext.SliceId\032\016.context.Slice\"\000\022.\n\010SetSlic" + + "e\022\016.context.Slice\032\020.context.SliceId\"\000\0220\n" + + "\nUnsetSlice\022\016.context.Slice\032\020.context.Sl" + + "iceId\"\000\0221\n\013RemoveSlice\022\020.context.SliceId" + + "\032\016.context.Empty\"\000\0229\n\016GetSliceEvents\022\016.c" + + "ontext.Empty\032\023.context.SliceEvent\"\0000\001\022D\n" + + "\021ListConnectionIds\022\022.context.ServiceId\032\031" + + ".context.ConnectionIdList\"\000\022@\n\017ListConne" + + "ctions\022\022.context.ServiceId\032\027.context.Con" + + "nectionList\"\000\022=\n\rGetConnection\022\025.context" + + ".ConnectionId\032\023.context.Connection\"\000\022=\n\r" + + "SetConnection\022\023.context.Connection\032\025.con" + + "text.ConnectionId\"\000\022;\n\020RemoveConnection\022" + + "\025.context.ConnectionId\032\016.context.Empty\"\000" + + "\022C\n\023GetConnectionEvents\022\016.context.Empty\032" + + "\030.context.ConnectionEvent\"\0000\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, diff --git a/src/policy/target/kubernetes/kubernetes.yml b/src/policy/target/kubernetes/kubernetes.yml index ba73f7234808e159ad9ac763674cfd89fce1e600..4e236db2fe0a520c0dba15b06142043494e1b289 100644 --- a/src/policy/target/kubernetes/kubernetes.yml +++ b/src/policy/target/kubernetes/kubernetes.yml @@ -1,3 +1,7 @@ +<<<<<<< HEAD +======= +--- +>>>>>>> 7d8a70789d7e1f21d4dd4aa245ca9030ea496c52 apiVersion: v1 kind: Service metadata: @@ -85,4 +89,4 @@ spec: initialDelaySeconds: 2 periodSeconds: 10 successThreshold: 1 - timeoutSeconds: 10 \ No newline at end of file + timeoutSeconds: 10 diff --git a/src/service/service/service_handler_api/FilterFields.py b/src/service/service/service_handler_api/FilterFields.py index 98113ba30fb095a29a2142e592b7759d2634eab9..0f21812089e2af8271884ef7539f979ff0426a5a 100644 --- a/src/service/service/service_handler_api/FilterFields.py +++ b/src/service/service/service_handler_api/FilterFields.py @@ -33,6 +33,7 @@ DEVICE_DRIVER_VALUES = { DeviceDriverEnum.DEVICEDRIVER_P4, DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352, + DeviceDriverEnum.DEVICEDRIVER_XR } # Map allowed filter fields to allowed values per Filter field. If no restriction (free text) None is specified diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index 89e717722d152ce978dca10a768119d9e9adaf1e..34689ca1136c68611a098115b5acf5b74a788372 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -18,6 +18,7 @@ from .l2nm_emulated.L2NMEmulatedServiceHandler import L2NMEmulatedServiceHandler from .l3nm_emulated.L3NMEmulatedServiceHandler import L3NMEmulatedServiceHandler from .l3nm_openconfig.L3NMOpenConfigServiceHandler import L3NMOpenConfigServiceHandler from .tapi_tapi.TapiServiceHandler import TapiServiceHandler +from .microwave.MicrowaveServiceHandler import MicrowaveServiceHandler SERVICE_HANDLERS = [ (L2NMEmulatedServiceHandler, [ @@ -41,7 +42,13 @@ SERVICE_HANDLERS = [ (TapiServiceHandler, [ { FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, - FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API, + FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API, DeviceDriverEnum.DEVICEDRIVER_XR], } ]), -] + (MicrowaveServiceHandler, [ + { + FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, + FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, + } + ]), +] \ No newline at end of file diff --git a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py index 18a5aea29eb7c025372d00828feb127336e90102..f12c9ab984205b9057dd1507114e5bc17d8deaa6 100644 --- a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py +++ b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py @@ -60,12 +60,13 @@ def setup_config_rules( {'name': network_instance_name, 'type': 'L2VSI'}), json_config_rule_set( - '/interface[{:s}]/subinterface[0]'.format(if_cirid_name, sub_interface_index), + '/interface[{:s}]/subinterface[{:d}]'.format(if_cirid_name, sub_interface_index), {'name': if_cirid_name, 'type': 'l2vlan', 'index': sub_interface_index, 'vlan_id': vlan_id}), json_config_rule_set( '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name), - {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, 'subinterface': 0}), + {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, + 'subinterface': sub_interface_index}), json_config_rule_set( '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id), @@ -107,10 +108,11 @@ def teardown_config_rules( json_config_rule_delete( '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name), - {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, 'subinterface': 0}), + {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, + 'subinterface': sub_interface_index}), json_config_rule_delete( - '/interface[{:s}]/subinterface[0]'.format(if_cirid_name, sub_interface_index), + '/interface[{:s}]/subinterface[{:d}]'.format(if_cirid_name, sub_interface_index), {'name': if_cirid_name, 'index': sub_interface_index}), json_config_rule_delete( diff --git a/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py b/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py new file mode 100644 index 0000000000000000000000000000000000000000..1ae08bbf6a7b0f6aeedbf9d571dfbc154e22dace --- /dev/null +++ b/src/service/service/service_handlers/microwave/MicrowaveServiceHandler.py @@ -0,0 +1,170 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import anytree, json, logging +from typing import Any, Dict, List, Optional, Tuple, Union +from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, DeviceId, Service +from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.AnyTreeTools import TreeNode, delete_subnode, get_subnode, set_subnode_value +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +LOGGER = logging.getLogger(__name__) + +def check_endpoint(endpoint : str, service_uuid : str) -> Tuple[str, str]: + endpoint_split = endpoint.split(':') + if len(endpoint_split) != 2: + raise Exception('Endpoint({:s}) is malformed for Service({:s})'.format(str(endpoint), str(service_uuid))) + return endpoint_split + +class MicrowaveServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service : Service, task_executor : TaskExecutor, **settings + ) -> None: + self.__service = service + self.__task_executor = task_executor # pylint: disable=unused-private-member + self.__resolver = anytree.Resolver(pathattr='name') + self.__config = TreeNode('.') + for config_rule in service.service_config.config_rules: + action = config_rule.action + if config_rule.WhichOneof('config_rule') != 'custom': continue + resource_key = config_rule.custom.resource_key + resource_value = config_rule.custom.resource_value + if action == ConfigActionEnum.CONFIGACTION_SET: + try: + resource_value = json.loads(resource_value) + except: # pylint: disable=bare-except + pass + set_subnode_value(self.__resolver, self.__config, resource_key, resource_value) + elif action == ConfigActionEnum.CONFIGACTION_DELETE: + delete_subnode(self.__resolver, self.__config, resource_key) + + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + LOGGER.info('[SetEndpoint] endpoints={:s}'.format(str(endpoints))) + LOGGER.info('[SetEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) + + service_uuid = self.__service.service_id.service_uuid.uuid + + results = [] + try: + chk_type('endpoints', endpoints, list) + if len(endpoints) != 2: raise Exception('len(endpoints) != 2') + + settings : TreeNode = get_subnode(self.__resolver, self.__config, '/settings', None) + if settings is None: + raise Exception('Unable to retrieve settings for Service({:s})'.format(str(service_uuid))) + + json_settings : Dict = settings.value + vlan_id = json_settings.get('vlan_id', 121) + # endpoints are retrieved in the following format --> '/endpoints/endpoint[172.26.60.243:9]' + node_id_src, tp_id_src = check_endpoint(endpoints[0][1], service_uuid) + node_id_dst, tp_id_dst = check_endpoint(endpoints[1][1], service_uuid) + + device_uuid = endpoints[0][0] + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + json_config_rule = json_config_rule_set('/service[{:s}]'.format(service_uuid), { + 'uuid' : service_uuid, + 'node_id_src': node_id_src, + 'tp_id_src' : tp_id_src, + 'node_id_dst': node_id_dst, + 'tp_id_dst' : tp_id_dst, + 'vlan_id' : vlan_id, + }) + del device.device_config.config_rules[:] + device.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device) + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to SetEndpoint for Service({:s})'.format(str(service_uuid))) + results.append(e) + + return results + + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + LOGGER.info('[DeleteEndpoint] endpoints={:s}'.format(str(endpoints))) + LOGGER.info('[DeleteEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) + + service_uuid = self.__service.service_id.service_uuid.uuid + + results = [] + try: + chk_type('endpoints', endpoints, list) + if len(endpoints) != 2: raise Exception('len(endpoints) != 2') + + device_uuid = endpoints[0][0] + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + json_config_rule = json_config_rule_delete('/service[{:s}]'.format(service_uuid), {'uuid': service_uuid}) + del device.device_config.config_rules[:] + device.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device) + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to DeleteEndpoint for Service({:s})'.format(str(service_uuid))) + results.append(e) + + return results + + def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + results = [] + for resource in resources: + try: + resource_key, resource_value = resource + resource_value = json.loads(resource_value) + set_subnode_value(self.__resolver, self.__config, resource_key, resource_value) + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to SetConfig({:s})'.format(str(resource))) + results.append(e) + + return results + + def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + results = [] + for resource in resources: + try: + resource_key, _ = resource + delete_subnode(self.__resolver, self.__config, resource_key) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to DeleteConfig({:s})'.format(str(resource))) + results.append(e) + + return results diff --git a/src/service/service/service_handlers/microwave/__init__.py b/src/service/service/service_handlers/microwave/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/service/service/service_handlers/microwave/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/slice/Dockerfile b/src/slice/Dockerfile index 96a751d156edcaef38794ecfe5b409cbeb081e82..7dadc477f70667c827d4a9eb0ddd013c85b97344 100644 --- a/src/slice/Dockerfile +++ b/src/slice/Dockerfile @@ -64,6 +64,7 @@ RUN python3 -m pip install -r requirements.txt WORKDIR /var/teraflow COPY src/context/. context/ COPY src/interdomain/. interdomain/ +COPY src/pathcomp/. pathcomp/ COPY src/service/. service/ COPY src/slice/. slice/ diff --git a/src/common/database/api/context/slice/SliceStatus.py b/src/slice/old_code/SliceStatus.py similarity index 100% rename from src/common/database/api/context/slice/SliceStatus.py rename to src/slice/old_code/SliceStatus.py diff --git a/src/slice/old_code/Tools.py b/src/slice/old_code/Tools.py index 4ea7900489f27588399e2eb94b6a5576d8b08fd0..08323f935195d8a0221b3f8889c0e6beeef94cb2 100644 --- a/src/slice/old_code/Tools.py +++ b/src/slice/old_code/Tools.py @@ -18,7 +18,7 @@ from common.Checkers import chk_options, chk_string from common.database.api.Database import Database from common.database.api.context.Constants import DEFAULT_CONTEXT_ID, DEFAULT_TOPOLOGY_ID from common.database.api.context.service.Service import Service -from common.database.api.context.slice.SliceStatus import SliceStatus, slicestatus_enum_values, to_slicestatus_enum +from slice.old_code.SliceStatus import SliceStatus, slicestatus_enum_values, to_slicestatus_enum from common.database.api.context.topology.device.Endpoint import Endpoint from common.exceptions.ServiceException import ServiceException from common.proto.slice_pb2 import TransportSlice diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py index 53875f0e6ae7c8e3e7d5ac9dad7501a2136844c4..ada7218588391766147a02f9713b540016522aa7 100644 --- a/src/slice/service/SliceServiceServicerImpl.py +++ b/src/slice/service/SliceServiceServicerImpl.py @@ -17,11 +17,12 @@ from common.proto.context_pb2 import ( Empty, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Slice, SliceId, SliceStatusEnum) from common.proto.slice_pb2_grpc import SliceServiceServicer from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.tools.context_queries.InterDomain import is_multi_domain from common.tools.grpc.ConfigRules import copy_config_rules from common.tools.grpc.Constraints import copy_constraints from common.tools.grpc.EndPointIds import copy_endpoint_ids from common.tools.grpc.ServiceIds import update_service_ids -from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string +from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from interdomain.client.InterdomainClient import InterdomainClient from service.client.ServiceClient import ServiceClient @@ -42,103 +43,118 @@ class SliceServiceServicerImpl(SliceServiceServicer): try: _slice = context_client.GetSlice(request.slice_id) #json_current_slice = grpc_message_to_json(_slice) - except: + except: # pylint: disable=bare-except #json_current_slice = {} slice_request = Slice() - slice_request.slice_id.CopyFrom(request.slice_id) - slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED + slice_request.slice_id.CopyFrom(request.slice_id) # pylint: disable=no-member + slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED # pylint: disable=no-member context_client.SetSlice(slice_request) _slice = context_client.GetSlice(request.slice_id) + slice_request = Slice() slice_request.CopyFrom(_slice) + if len(request.slice_endpoint_ids) < 2: + # unable to identify the kind of slice; just update endpoints, constraints and config rules + # update the slice in database, and return + # pylint: disable=no-member + copy_endpoint_ids(request.slice_endpoint_ids, slice_request.slice_endpoint_ids) + copy_constraints(request.slice_constraints, slice_request.slice_constraints) + copy_config_rules(request.slice_config.config_rules, slice_request.slice_config.config_rules) + return context_client.SetSlice(slice_request) + #LOGGER.info('json_current_slice = {:s}'.format(str(json_current_slice))) #json_updated_slice = grpc_message_to_json(request) #LOGGER.info('json_updated_slice = {:s}'.format(str(json_updated_slice))) #changes = deepdiff.DeepDiff(json_current_slice, json_updated_slice) #LOGGER.info('changes = {:s}'.format(str(changes))) - domains = set() - for slice_endpoint_id in request.slice_endpoint_ids: - device_uuid = slice_endpoint_id.device_id.device_uuid.uuid - device_parts = device_uuid.split('@') - domain_uuid = '' if len(device_parts) == 1 else device_parts[1] - domains.add(domain_uuid) - LOGGER.info('domains = {:s}'.format(str(domains))) - is_multi_domain = len(domains) > 1 - LOGGER.info('is_multi_domain = {:s}'.format(str(is_multi_domain))) - - if is_multi_domain: + if is_multi_domain(context_client, request.slice_endpoint_ids): interdomain_client = InterdomainClient() slice_id = interdomain_client.RequestSlice(request) - else: - service_id = ServiceId() - context_uuid = service_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid - slice_uuid = service_uuid = service_id.service_uuid.uuid = request.slice_id.slice_uuid.uuid - - service_client = ServiceClient() - try: - _service = context_client.GetService(service_id) - except: - service_request = Service() - service_request.service_id.CopyFrom(service_id) - service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN - service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED - service_reply = service_client.CreateService(service_request) - if service_reply != service_request.service_id: # pylint: disable=no-member - raise Exception('Service creation failed. Wrong Service Id was returned') - _service = context_client.GetService(service_id) + slice_ = context_client.GetSlice(slice_id) + slice_active = Slice() + slice_active.CopyFrom(slice_) + slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member + context_client.SetSlice(slice_active) + return slice_id + + # Local domain slice + service_id = ServiceId() + # pylint: disable=no-member + context_uuid = service_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid + slice_uuid = service_uuid = service_id.service_uuid.uuid = request.slice_id.slice_uuid.uuid + + service_client = ServiceClient() + try: + _service = context_client.GetService(service_id) + except: # pylint: disable=bare-except + # pylint: disable=no-member service_request = Service() - service_request.CopyFrom(_service) - - copy_endpoint_ids(request.slice_endpoint_ids, service_request.service_endpoint_ids) - copy_constraints(request.slice_constraints, service_request.service_constraints) - copy_config_rules(request.slice_config.config_rules, service_request.service_config.config_rules) - + service_request.service_id.CopyFrom(service_id) service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN - for config_rule in request.slice_config.config_rules: - LOGGER.info('config_rule: {:s}'.format(grpc_message_to_json_string(config_rule))) - config_rule_kind = config_rule.WhichOneof('config_rule') - LOGGER.info('config_rule_kind: {:s}'.format(str(config_rule_kind))) - if config_rule_kind != 'custom': continue - custom = config_rule.custom - resource_key = custom.resource_key - LOGGER.info('resource_key: {:s}'.format(str(resource_key))) - - # TODO: parse resource key with regular expression, e.g.: - # m = re.match('\/device\[[^\]]\]\/endpoint\[[^\]]\]\/settings', s) - if not resource_key.startswith('/device'): continue - if not resource_key.endswith('/settings'): continue - - resource_value = json.loads(custom.resource_value) - LOGGER.info('resource_value: {:s}'.format(str(resource_value))) - - if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN: - if (resource_value.get('address_ip') is not None and \ - resource_value.get('address_prefix') is not None): - service_request.service_type = ServiceTypeEnum.SERVICETYPE_L3NM - LOGGER.info('is L3') - else: - service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM - LOGGER.info('is L2') - break - - service_reply = service_client.UpdateService(service_request) + service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED + service_reply = service_client.CreateService(service_request) if service_reply != service_request.service_id: # pylint: disable=no-member - raise Exception('Service update failed. Wrong Service Id was returned') - - copy_endpoint_ids(request.slice_endpoint_ids, slice_request.slice_endpoint_ids) - copy_constraints(request.slice_constraints, slice_request.slice_constraints) - copy_config_rules(request.slice_config.config_rules, slice_request.slice_config.config_rules) - - update_service_ids(slice_request.slice_service_ids, context_uuid, service_uuid) - context_client.SetSlice(slice_request) - slice_id = slice_request.slice_id + # pylint: disable=raise-missing-from + raise Exception('Service creation failed. Wrong Service Id was returned') + _service = context_client.GetService(service_id) + service_request = Service() + service_request.CopyFrom(_service) + + # pylint: disable=no-member + copy_endpoint_ids(request.slice_endpoint_ids, service_request.service_endpoint_ids) + copy_constraints(request.slice_constraints, service_request.service_constraints) + copy_config_rules(request.slice_config.config_rules, service_request.service_config.config_rules) + + service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN + for config_rule in request.slice_config.config_rules: + LOGGER.info('config_rule: {:s}'.format(grpc_message_to_json_string(config_rule))) + config_rule_kind = config_rule.WhichOneof('config_rule') + LOGGER.info('config_rule_kind: {:s}'.format(str(config_rule_kind))) + if config_rule_kind != 'custom': continue + custom = config_rule.custom + resource_key = custom.resource_key + LOGGER.info('resource_key: {:s}'.format(str(resource_key))) + + # TODO: parse resource key with regular expression, e.g.: + # m = re.match('\/device\[[^\]]\]\/endpoint\[[^\]]\]\/settings', s) + if not resource_key.startswith('/device'): continue + if not resource_key.endswith('/settings'): continue + + resource_value = json.loads(custom.resource_value) + LOGGER.info('resource_value: {:s}'.format(str(resource_value))) + + if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN: + if (resource_value.get('address_ip') is not None and \ + resource_value.get('address_prefix') is not None): + service_request.service_type = ServiceTypeEnum.SERVICETYPE_L3NM + LOGGER.info('is L3') + else: + service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM + LOGGER.info('is L2') + break + + if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN: + service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM + LOGGER.info('assume L2') + + service_reply = service_client.UpdateService(service_request) + if service_reply != service_request.service_id: # pylint: disable=no-member + raise Exception('Service update failed. Wrong Service Id was returned') + + copy_endpoint_ids(request.slice_endpoint_ids, slice_request.slice_endpoint_ids) + copy_constraints(request.slice_constraints, slice_request.slice_constraints) + copy_config_rules(request.slice_config.config_rules, slice_request.slice_config.config_rules) + + update_service_ids(slice_request.slice_service_ids, context_uuid, service_uuid) + context_client.SetSlice(slice_request) + slice_id = slice_request.slice_id slice_ = context_client.GetSlice(slice_id) slice_active = Slice() slice_active.CopyFrom(slice_) - slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE + slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member context_client.SetSlice(slice_active) return slice_id @@ -175,21 +191,11 @@ class SliceServiceServicerImpl(SliceServiceServicer): context_client = ContextClient() try: _slice = context_client.GetSlice(request) - except: + except: # pylint: disable=bare-except return Empty() - domains = set() - for slice_endpoint_id in _slice.slice_endpoint_ids: - device_uuid = slice_endpoint_id.device_id.device_uuid.uuid - device_parts = device_uuid.split('@') - domain_uuid = '' if len(device_parts) == 1 else device_parts[1] - domains.add(domain_uuid) - LOGGER.info('domains = {:s}'.format(str(domains))) - is_multi_domain = len(domains) > 1 - LOGGER.info('is_multi_domain = {:s}'.format(str(is_multi_domain))) - - if is_multi_domain: - interdomain_client = InterdomainClient() + if is_multi_domain(context_client, _slice.slice_endpoint_ids): + #interdomain_client = InterdomainClient() #slice_id = interdomain_client.DeleteSlice(request) raise NotImplementedError('Delete inter-domain slice') else: diff --git a/src/slice/service/__main__.py b/src/slice/service/__main__.py index a59c54b4b1b56865871d331409c1a7f60629aec6..b2f4536503ac176628c42cf0211315089697c50e 100644 --- a/src/slice/service/__main__.py +++ b/src/slice/service/__main__.py @@ -15,7 +15,9 @@ import logging, signal, sys, threading from prometheus_client import start_http_server from common.Constants import ServiceNameEnum -from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, wait_for_environment_variables +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, + wait_for_environment_variables) from .SliceService import SliceService terminate = threading.Event() diff --git a/src/tests/Fixtures.py b/src/tests/Fixtures.py index aeead8448651b386f4c69d12c139b6043fe5ef55..25b73e1de143b8c60d9a726ddf2bd3cea97d17a5 100644 --- a/src/tests/Fixtures.py +++ b/src/tests/Fixtures.py @@ -13,8 +13,6 @@ # limitations under the License. import pytest -from common.Settings import get_setting -from compute.tests.mock_osm.MockOSM import MockOSM from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from monitoring.client.MonitoringClient import MonitoringClient diff --git a/src/tests/ecoc22/run_tests_and_coverage.sh b/src/tests/ecoc22/run_tests_and_coverage.sh index 835867896020f2b94e0797bdf60c85af2228eda2..4517cc1ea7eec7027219517720c99bfea3b4250b 100755 --- a/src/tests/ecoc22/run_tests_and_coverage.sh +++ b/src/tests/ecoc22/run_tests_and_coverage.sh @@ -16,7 +16,6 @@ PROJECTDIR=`pwd` -cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc COVERAGEFILE=$PROJECTDIR/coverage/.coverage @@ -26,18 +25,20 @@ cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/teraflow/controller+$PRO # Destroy old coverage file rm -f $COVERAGEFILE +source tfs_runtime_env_vars.sh + # Force a flush of Context database kubectl --namespace $TFS_K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL # Run functional tests and analyze code coverage at the same time coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ecoc22/tests/test_functional_bootstrap.py + src/tests/ecoc22/tests/test_functional_bootstrap.py coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ecoc22/tests/test_functional_create_service.py + src/tests/ecoc22/tests/test_functional_create_service.py coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ecoc22/tests/test_functional_delete_service.py + src/tests/ecoc22/tests/test_functional_delete_service.py coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ecoc22/tests/test_functional_cleanup.py + src/tests/ecoc22/tests/test_functional_cleanup.py diff --git a/src/tests/nfvsdn22 b/src/tests/nfvsdn22 new file mode 120000 index 0000000000000000000000000000000000000000..e8122da56327bf631c751cbe38ce6b37d3dc7378 --- /dev/null +++ b/src/tests/nfvsdn22 @@ -0,0 +1 @@ +./scenario2 \ No newline at end of file diff --git a/src/tests/ofc22/deploy_specs.sh b/src/tests/ofc22/deploy_specs.sh index 8afd683843d4882e75c3cbca8363aa3d63edda7f..ffd91da35186fe21f418950493ef797a9af1b522 100644 --- a/src/tests/ofc22/deploy_specs.sh +++ b/src/tests/ofc22/deploy_specs.sh @@ -2,6 +2,11 @@ export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. +# Supported components are: +# context device automation policy service compute monitoring webui +# interdomain slice pathcomp dlt +# dbscanserving opticalattackmitigator opticalattackdetector +# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui" # Set the tag you want to use for your images. @@ -13,5 +18,9 @@ export TFS_K8S_NAMESPACE="tfs" # Set additional manifest files to be applied after the deployment export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" -# Set the neew Grafana admin password +# Set the new Grafana admin password export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} diff --git a/src/tests/ofc22/descriptors_emulated.json b/src/tests/ofc22/descriptors_emulated.json index 83f9c39e2ac7154b088ccdd0a1519ea32c1aee1d..a71d454f41f324cabb48a023d6d840a59245800c 100644 --- a/src/tests/ofc22/descriptors_emulated.json +++ b/src/tests/ofc22/descriptors_emulated.json @@ -9,70 +9,83 @@ "topologies": [ { "topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}}, - "device_ids": [], - "link_ids": [] + "device_ids": [ + {"device_uuid": {"uuid": "R1-EMU"}}, + {"device_uuid": {"uuid": "R2-EMU"}}, + {"device_uuid": {"uuid": "R3-EMU"}}, + {"device_uuid": {"uuid": "R4-EMU"}}, + {"device_uuid": {"uuid": "O1-OLS"}} + ], + "link_ids": [ + {"link_uuid": {"uuid": "R1-EMU/13/0/0==O1-OLS/aade6001-f00b-5e2f-a357-6a0a9d3de870"}}, + {"link_uuid": {"uuid": "R2-EMU/13/0/0==O1-OLS/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}, + {"link_uuid": {"uuid": "R3-EMU/13/0/0==O1-OLS/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}, + {"link_uuid": {"uuid": "R4-EMU/13/0/0==O1-OLS/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}} + ] } ], "devices": [ { - "device_id": {"device_uuid": {"uuid": "R1-EMU"}}, - "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "R1-EMU"}}, "device_type": "emu-packet-router", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} - ]}, - "device_operational_status": 1, - "device_drivers": [0], - "device_endpoints": [] + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} + ]} }, { - "device_id": {"device_uuid": {"uuid": "R2-EMU"}}, - "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "R2-EMU"}}, "device_type": "emu-packet-router", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} - ]}, - "device_operational_status": 1, - "device_drivers": [0], - "device_endpoints": [] + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} + ]} }, { - "device_id": {"device_uuid": {"uuid": "R3-EMU"}}, - "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "R3-EMU"}}, "device_type": "emu-packet-router", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} - ]}, - "device_operational_status": 1, - "device_drivers": [0], - "device_endpoints": [] + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} + ]} }, { - "device_id": {"device_uuid": {"uuid": "R4-EMU"}}, - "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "R4-EMU"}}, "device_type": "emu-packet-router", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} - ]}, - "device_operational_status": 1, - "device_drivers": [0], - "device_endpoints": [] + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} + ]} }, { - "device_id": {"device_uuid": {"uuid": "O1-OLS"}}, - "device_type": "emu-open-line-system", + "device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "device_type": "emu-open-line-system", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"aade6001-f00b-5e2f-a357-6a0a9d3de870\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"eb287d83-f05e-53ec-ab5a-adf6bd2b5418\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"0ef74f99-1acc-57bd-ab9d-4b958b06c513\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"50296d99-58cc-5ce7-82f5-fc8ee4eec2ec\"}]}"}} - ]}, - "device_operational_status": 1, - "device_drivers": [0], - "device_endpoints": [] + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870", "type": "optical", "sample_types": []}, + {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418", "type": "optical", "sample_types": []}, + {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513", "type": "optical", "sample_types": []}, + {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec", "type": "optical", "sample_types": []} + ]}}} + ]} } ], "links": [ diff --git a/src/tests/ofc22/descriptors_emulated_xr.json b/src/tests/ofc22/descriptors_emulated_xr.json new file mode 100644 index 0000000000000000000000000000000000000000..30bd97dddeb94f836d3fe66e51fce729c34ceced --- /dev/null +++ b/src/tests/ofc22/descriptors_emulated_xr.json @@ -0,0 +1,108 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [], + "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}}, + "device_ids": [], + "link_ids": [] + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "R1-EMU"}}, + "device_type": "emu-packet-router", + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} + ]}, + "device_operational_status": 1, + "device_drivers": [0], + "device_endpoints": [] + }, + { + "device_id": {"device_uuid": {"uuid": "R2-EMU"}}, + "device_type": "emu-packet-router", + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} + ]}, + "device_operational_status": 1, + "device_drivers": [0], + "device_endpoints": [] + }, + { + "device_id": {"device_uuid": {"uuid": "R3-EMU"}}, + "device_type": "emu-packet-router", + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} + ]}, + "device_operational_status": 1, + "device_drivers": [0], + "device_endpoints": [] + }, + { + "device_id": {"device_uuid": {"uuid": "R4-EMU"}}, + "device_type": "emu-packet-router", + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} + ]}, + "device_operational_status": 1, + "device_drivers": [0], + "device_endpoints": [] + }, + { + "device_id": {"device_uuid": {"uuid": "X1-XR-CONSTELLATION"}}, + "device_type": "xr-constellation", + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "172.19.219.44"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "443"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"username\": \"xr-user-1\", \"password\": \"xr-user-1\", \"hub_module_name\": \"XR HUB 1\"}"}} + ]}, + "device_operational_status": 1, + "device_drivers": [6], + "device_endpoints": [] + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "R1-EMU/13/0/0==XR HUB 1|XR-T4"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}, + {"device_id": {"device_uuid": {"uuid": "X1-XR-CONSTELLATION"}}, "endpoint_uuid": {"uuid": "XR HUB 1|XR-T4"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2-EMU/13/0/0==XR HUB 1|XR-T3"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}, + {"device_id": {"device_uuid": {"uuid": "X1-XR-CONSTELLATION"}}, "endpoint_uuid": {"uuid": "XR HUB 1|XR-T3"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R3-EMU/13/0/0==XR1-XR LEAF 1|XR-T1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}, + {"device_id": {"device_uuid": {"uuid": "X1-XR-CONSTELLATION"}}, "endpoint_uuid": {"uuid": "XR LEAF 1|XR-T1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R4-EMU/13/0/0==XR LEAF 2|XR-T1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}, + {"device_id": {"device_uuid": {"uuid": "X1-XR-CONSTELLATION"}}, "endpoint_uuid": {"uuid": "XR LEAF 2|XR-T1"}} + ] + } + ] +} diff --git a/src/tests/ofc22/run_test_01_bootstrap.sh b/src/tests/ofc22/run_test_01_bootstrap.sh index bb740707321b24fc960299f2eac91cc2d9775b64..61b49b251f927ffb2e845f0c9094d30ea597abc6 100755 --- a/src/tests/ofc22/run_test_01_bootstrap.sh +++ b/src/tests/ofc22/run_test_01_bootstrap.sh @@ -13,9 +13,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -# make sure to source the following scripts: -# - my_deploy.sh -# - tfs_runtime_env_vars.sh - source tfs_runtime_env_vars.sh -pytest --verbose src/tests/ofc22/tests/test_functional_bootstrap.py +pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_bootstrap.py diff --git a/src/tests/ofc22/run_test_02_create_service.sh b/src/tests/ofc22/run_test_02_create_service.sh index 8b6c8658df759bdcb777f83c6c7846d0ea7b48ed..135a3f74fe93d0d7a4da6ef0e02371a040fc1eb3 100755 --- a/src/tests/ofc22/run_test_02_create_service.sh +++ b/src/tests/ofc22/run_test_02_create_service.sh @@ -14,4 +14,4 @@ # limitations under the License. source tfs_runtime_env_vars.sh -pytest --verbose src/tests/ofc22/tests/test_functional_create_service.py +pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_create_service.py diff --git a/src/tests/ofc22/run_test_03_delete_service.sh b/src/tests/ofc22/run_test_03_delete_service.sh index 51df41aee216e141b0d2e2f55a0398ecd9cdf35f..cbe6714fe91cf1758f62e697e667568d35578181 100755 --- a/src/tests/ofc22/run_test_03_delete_service.sh +++ b/src/tests/ofc22/run_test_03_delete_service.sh @@ -14,4 +14,4 @@ # limitations under the License. source tfs_runtime_env_vars.sh -pytest --verbose src/tests/ofc22/tests/test_functional_delete_service.py +pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_delete_service.py diff --git a/src/tests/ofc22/run_test_04_cleanup.sh b/src/tests/ofc22/run_test_04_cleanup.sh index 2ba91684f9eb49075dd68877e54976f989811ae9..e88ddbd3227b3f29dfc7f126d5853e0b1d0e06f1 100755 --- a/src/tests/ofc22/run_test_04_cleanup.sh +++ b/src/tests/ofc22/run_test_04_cleanup.sh @@ -14,4 +14,4 @@ # limitations under the License. source tfs_runtime_env_vars.sh -pytest --verbose src/tests/ofc22/tests/test_functional_cleanup.py +pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_cleanup.py diff --git a/src/tests/ofc22/run_tests_and_coverage.sh b/src/tests/ofc22/run_tests.sh similarity index 62% rename from src/tests/ofc22/run_tests_and_coverage.sh rename to src/tests/ofc22/run_tests.sh index bafc920c71a640d083497e1cd6ae025d0ea7cef5..0ad4be313987b8b5069808873f94840521d4284e 100755 --- a/src/tests/ofc22/run_tests_and_coverage.sh +++ b/src/tests/ofc22/run_tests.sh @@ -16,30 +16,29 @@ PROJECTDIR=`pwd` -cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc COVERAGEFILE=$PROJECTDIR/coverage/.coverage # Configure the correct folder on the .coveragerc file -cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/teraflow/controller+$PROJECTDIR+g > $RCFILE +cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/teraflow/controller+$PROJECTDIR/src+g > $RCFILE # Destroy old coverage file rm -f $COVERAGEFILE +source tfs_runtime_env_vars.sh + # Force a flush of Context database kubectl --namespace $TFS_K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL -source tfs_runtime_env_vars.sh - -# Run functional tests and analyze code coverage at the same time -coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ofc22/tests/test_functional_bootstrap.py +# Run functional tests +pytest --log-level=INFO --verbose \ + src/tests/ofc22/tests/test_functional_bootstrap.py -coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ofc22/tests/test_functional_create_service.py +pytest --log-level=INFO --verbose \ + src/tests/ofc22/tests/test_functional_create_service.py -coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ofc22/tests/test_functional_delete_service.py +pytest --log-level=INFO --verbose \ + src/tests/ofc22/tests/test_functional_delete_service.py -coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ofc22/tests/test_functional_cleanup.py +pytest --log-level=INFO --verbose \ + src/tests/ofc22/tests/test_functional_cleanup.py diff --git a/src/tests/ofc22/tests/Fixtures.py b/src/tests/ofc22/tests/Fixtures.py index 370731e5de14b2c7c4acdcfa86eacfa66f2ffd4b..3b35a12e299ba776e909fbdd2739e971431083a6 100644 --- a/src/tests/ofc22/tests/Fixtures.py +++ b/src/tests/ofc22/tests/Fixtures.py @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest +import pytest, logging from common.Settings import get_setting -from compute.tests.mock_osm.MockOSM import MockOSM -from .Objects import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME +from tests.tools.mock_osm.Constants import WIM_PASSWORD, WIM_USERNAME +from tests.tools.mock_osm.MockOSM import MockOSM +from .Objects import WIM_MAPPING +LOGGER = logging.getLogger(__name__) @pytest.fixture(scope='session') def osm_wim(): wim_url = 'http://{:s}:{:s}'.format( get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP'))) + LOGGER.info('WIM_MAPPING = {:s}'.format(str(WIM_MAPPING))) return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD) diff --git a/src/tests/ofc22/tests/Objects.py b/src/tests/ofc22/tests/Objects.py index d2fb32ebb20b7bcdda9ac12b7a7390c46e6fb1d1..7bfbe9fce558d6a86d965ecb6421369d7f544d4d 100644 --- a/src/tests/ofc22/tests/Objects.py +++ b/src/tests/ofc22/tests/Objects.py @@ -12,220 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List, Tuple -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID -from common.tools.object_factory.Context import json_context, json_context_id -from common.tools.object_factory.Device import ( - json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, - json_device_emulated_tapi_disabled, json_device_id, json_device_packetrouter_disabled, json_device_tapi_disabled) -from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id -from common.tools.object_factory.Link import json_link, json_link_id -from common.tools.object_factory.Topology import json_topology, json_topology_id -from common.proto.kpi_sample_types_pb2 import KpiSampleType - -# ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) - -# ----- Topology ------------------------------------------------------------------------------------------------------- -TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) -TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) - -# ----- Monitoring Samples --------------------------------------------------------------------------------------------- -PACKET_PORT_SAMPLE_TYPES = [ - KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED, - KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED, - KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED, - KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED, -] - -# ----- Device Credentials and Settings -------------------------------------------------------------------------------- -try: - from .Credentials import DEVICE_R1_ADDRESS, DEVICE_R1_PORT, DEVICE_R1_USERNAME, DEVICE_R1_PASSWORD - from .Credentials import DEVICE_R3_ADDRESS, DEVICE_R3_PORT, DEVICE_R3_USERNAME, DEVICE_R3_PASSWORD - from .Credentials import DEVICE_O1_ADDRESS, DEVICE_O1_PORT - USE_REAL_DEVICES = True # Use real devices -except ImportError: - USE_REAL_DEVICES = False # Use emulated devices - - DEVICE_R1_ADDRESS = '0.0.0.0' - DEVICE_R1_PORT = 830 - DEVICE_R1_USERNAME = 'admin' - DEVICE_R1_PASSWORD = 'admin' - - DEVICE_R3_ADDRESS = '0.0.0.0' - DEVICE_R3_PORT = 830 - DEVICE_R3_USERNAME = 'admin' - DEVICE_R3_PASSWORD = 'admin' - - DEVICE_O1_ADDRESS = '0.0.0.0' - DEVICE_O1_PORT = 4900 - -#USE_REAL_DEVICES = False # Uncomment to force to use emulated devices - -def json_endpoint_ids(device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]]): - return [ - json_endpoint_id(device_id, ep_uuid, topology_id=None) - for ep_uuid, _, _ in endpoint_descriptors - ] - -def json_endpoints(device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]]): - return [ - json_endpoint(device_id, ep_uuid, ep_type, topology_id=None, kpi_sample_types=ep_sample_types) - for ep_uuid, ep_type, ep_sample_types in endpoint_descriptors - ] - -def get_link_uuid(a_device_id : Dict, a_endpoint_id : Dict, z_device_id : Dict, z_endpoint_id : Dict) -> str: - return '{:s}/{:s}=={:s}/{:s}'.format( - a_device_id['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'], - z_device_id['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid']) - - -# ----- Devices -------------------------------------------------------------------------------------------------------- -if not USE_REAL_DEVICES: - json_device_packetrouter_disabled = json_device_emulated_packet_router_disabled - json_device_tapi_disabled = json_device_emulated_tapi_disabled - -DEVICE_R1_UUID = 'R1-EMU' -DEVICE_R1_TIMEOUT = 120 -DEVICE_R1_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)] -DEVICE_R1_ID = json_device_id(DEVICE_R1_UUID) -#DEVICE_R1_ENDPOINTS = json_endpoints(DEVICE_R1_ID, DEVICE_R1_ENDPOINT_DEFS) -DEVICE_R1_ENDPOINT_IDS = json_endpoint_ids(DEVICE_R1_ID, DEVICE_R1_ENDPOINT_DEFS) -DEVICE_R1 = json_device_packetrouter_disabled(DEVICE_R1_UUID) -ENDPOINT_ID_R1_13_0_0 = DEVICE_R1_ENDPOINT_IDS[0] -ENDPOINT_ID_R1_13_1_2 = DEVICE_R1_ENDPOINT_IDS[1] -DEVICE_R1_CONNECT_RULES = json_device_connect_rules(DEVICE_R1_ADDRESS, DEVICE_R1_PORT, { - 'username': DEVICE_R1_USERNAME, - 'password': DEVICE_R1_PASSWORD, - 'timeout' : DEVICE_R1_TIMEOUT, -}) if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_R1_ENDPOINT_DEFS) - - -DEVICE_R2_UUID = 'R2-EMU' -DEVICE_R2_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)] -DEVICE_R2_ID = json_device_id(DEVICE_R2_UUID) -#DEVICE_R2_ENDPOINTS = json_endpoints(DEVICE_R2_ID, DEVICE_R2_ENDPOINT_DEFS) -DEVICE_R2_ENDPOINT_IDS = json_endpoint_ids(DEVICE_R2_ID, DEVICE_R2_ENDPOINT_DEFS) -DEVICE_R2 = json_device_emulated_packet_router_disabled(DEVICE_R2_UUID) -ENDPOINT_ID_R2_13_0_0 = DEVICE_R2_ENDPOINT_IDS[0] -ENDPOINT_ID_R2_13_1_2 = DEVICE_R2_ENDPOINT_IDS[1] -DEVICE_R2_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_R2_ENDPOINT_DEFS) - - -DEVICE_R3_UUID = 'R3-EMU' -DEVICE_R3_TIMEOUT = 120 -DEVICE_R3_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)] -DEVICE_R3_ID = json_device_id(DEVICE_R3_UUID) -#DEVICE_R3_ENDPOINTS = json_endpoints(DEVICE_R3_ID, DEVICE_R3_ENDPOINT_DEFS) -DEVICE_R3_ENDPOINT_IDS = json_endpoint_ids(DEVICE_R3_ID, DEVICE_R3_ENDPOINT_DEFS) -DEVICE_R3 = json_device_packetrouter_disabled(DEVICE_R3_UUID) -ENDPOINT_ID_R3_13_0_0 = DEVICE_R3_ENDPOINT_IDS[0] -ENDPOINT_ID_R3_13_1_2 = DEVICE_R3_ENDPOINT_IDS[1] -DEVICE_R3_CONNECT_RULES = json_device_connect_rules(DEVICE_R3_ADDRESS, DEVICE_R3_PORT, { - 'username': DEVICE_R3_USERNAME, - 'password': DEVICE_R3_PASSWORD, - 'timeout' : DEVICE_R3_TIMEOUT, -}) if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_R3_ENDPOINT_DEFS) - - -DEVICE_R4_UUID = 'R4-EMU' -DEVICE_R4_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)] -DEVICE_R4_ID = json_device_id(DEVICE_R4_UUID) -#DEVICE_R4_ENDPOINTS = json_endpoints(DEVICE_R4_ID, DEVICE_R4_ENDPOINT_DEFS) -DEVICE_R4_ENDPOINT_IDS = json_endpoint_ids(DEVICE_R4_ID, DEVICE_R4_ENDPOINT_DEFS) -DEVICE_R4 = json_device_emulated_packet_router_disabled(DEVICE_R4_UUID) -ENDPOINT_ID_R4_13_0_0 = DEVICE_R4_ENDPOINT_IDS[0] -ENDPOINT_ID_R4_13_1_2 = DEVICE_R4_ENDPOINT_IDS[1] -DEVICE_R4_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_R4_ENDPOINT_DEFS) - - -DEVICE_O1_UUID = 'O1-OLS' -DEVICE_O1_TIMEOUT = 120 -DEVICE_O1_ENDPOINT_DEFS = [ - ('aade6001-f00b-5e2f-a357-6a0a9d3de870', 'optical', []), # node_1_port_13 - ('eb287d83-f05e-53ec-ab5a-adf6bd2b5418', 'optical', []), # node_2_port_13 - ('0ef74f99-1acc-57bd-ab9d-4b958b06c513', 'optical', []), # node_3_port_13 - ('50296d99-58cc-5ce7-82f5-fc8ee4eec2ec', 'optical', []), # node_4_port_13 -] -DEVICE_O1_ID = json_device_id(DEVICE_O1_UUID) -DEVICE_O1 = json_device_tapi_disabled(DEVICE_O1_UUID) -#DEVICE_O1_ENDPOINTS = json_endpoints(DEVICE_O1_ID, DEVICE_O1_ENDPOINT_DEFS) -DEVICE_O1_ENDPOINT_IDS = json_endpoint_ids(DEVICE_O1_ID, DEVICE_O1_ENDPOINT_DEFS) -ENDPOINT_ID_O1_EP1 = DEVICE_O1_ENDPOINT_IDS[0] -ENDPOINT_ID_O1_EP2 = DEVICE_O1_ENDPOINT_IDS[1] -ENDPOINT_ID_O1_EP3 = DEVICE_O1_ENDPOINT_IDS[2] -ENDPOINT_ID_O1_EP4 = DEVICE_O1_ENDPOINT_IDS[3] -DEVICE_O1_CONNECT_RULES = json_device_connect_rules(DEVICE_O1_ADDRESS, DEVICE_O1_PORT, { - 'timeout' : DEVICE_O1_TIMEOUT, -}) if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_O1_ENDPOINT_DEFS) - - -# ----- Links ---------------------------------------------------------------------------------------------------------- -LINK_R1_O1_UUID = get_link_uuid(DEVICE_R1_ID, ENDPOINT_ID_R1_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP1) -LINK_R1_O1_ID = json_link_id(LINK_R1_O1_UUID) -LINK_R1_O1 = json_link(LINK_R1_O1_UUID, [ENDPOINT_ID_R1_13_0_0, ENDPOINT_ID_O1_EP1]) - -LINK_R2_O1_UUID = get_link_uuid(DEVICE_R2_ID, ENDPOINT_ID_R2_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP2) -LINK_R2_O1_ID = json_link_id(LINK_R2_O1_UUID) -LINK_R2_O1 = json_link(LINK_R2_O1_UUID, [ENDPOINT_ID_R2_13_0_0, ENDPOINT_ID_O1_EP2]) - -LINK_R3_O1_UUID = get_link_uuid(DEVICE_R3_ID, ENDPOINT_ID_R3_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP3) -LINK_R3_O1_ID = json_link_id(LINK_R3_O1_UUID) -LINK_R3_O1 = json_link(LINK_R3_O1_UUID, [ENDPOINT_ID_R3_13_0_0, ENDPOINT_ID_O1_EP3]) - -LINK_R4_O1_UUID = get_link_uuid(DEVICE_R4_ID, ENDPOINT_ID_R4_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP4) -LINK_R4_O1_ID = json_link_id(LINK_R4_O1_UUID) -LINK_R4_O1 = json_link(LINK_R4_O1_UUID, [ENDPOINT_ID_R4_13_0_0, ENDPOINT_ID_O1_EP4]) - +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.EndPoint import json_endpoint_id +from tests.tools.mock_osm.Tools import connection_point, wim_mapping # ----- WIM Service Settings ------------------------------------------------------------------------------------------- -def compose_service_endpoint_id(endpoint_id): - device_uuid = endpoint_id['device_id']['device_uuid']['uuid'] - endpoint_uuid = endpoint_id['endpoint_uuid']['uuid'] - return ':'.join([device_uuid, endpoint_uuid]) - -WIM_SEP_R1_ID = compose_service_endpoint_id(ENDPOINT_ID_R1_13_1_2) -WIM_SEP_R1_SITE_ID = '1' -WIM_SEP_R1_BEARER = WIM_SEP_R1_ID -WIM_SRV_R1_VLAN_ID = 400 +WIM_DC1_SITE_ID = '1' +WIM_DC1_DEVICE_ID = json_device_id('R1-EMU') +WIM_DC1_ENDPOINT_ID = json_endpoint_id(WIM_DC1_DEVICE_ID, '13/1/2') -WIM_SEP_R3_ID = compose_service_endpoint_id(ENDPOINT_ID_R3_13_1_2) -WIM_SEP_R3_SITE_ID = '2' -WIM_SEP_R3_BEARER = WIM_SEP_R3_ID -WIM_SRV_R3_VLAN_ID = 500 +WIM_DC2_SITE_ID = '2' +WIM_DC2_DEVICE_ID = json_device_id('R3-EMU') +WIM_DC2_ENDPOINT_ID = json_endpoint_id(WIM_DC2_DEVICE_ID, '13/1/2') -WIM_USERNAME = 'admin' -WIM_PASSWORD = 'admin' +WIM_SEP_DC1, WIM_MAP_DC1 = wim_mapping(WIM_DC1_SITE_ID, WIM_DC1_ENDPOINT_ID) +WIM_SEP_DC2, WIM_MAP_DC2 = wim_mapping(WIM_DC2_SITE_ID, WIM_DC2_ENDPOINT_ID) +WIM_MAPPING = [WIM_MAP_DC1, WIM_MAP_DC2] -WIM_MAPPING = [ - {'device-id': DEVICE_R1_UUID, 'service_endpoint_id': WIM_SEP_R1_ID, - 'service_mapping_info': {'bearer': {'bearer-reference': WIM_SEP_R1_BEARER}, 'site-id': WIM_SEP_R1_SITE_ID}}, - {'device-id': DEVICE_R3_UUID, 'service_endpoint_id': WIM_SEP_R3_ID, - 'service_mapping_info': {'bearer': {'bearer-reference': WIM_SEP_R3_BEARER}, 'site-id': WIM_SEP_R3_SITE_ID}}, -] +WIM_SRV_VLAN_ID = 300 WIM_SERVICE_TYPE = 'ELINE' WIM_SERVICE_CONNECTION_POINTS = [ - {'service_endpoint_id': WIM_SEP_R1_ID, - 'service_endpoint_encapsulation_type': 'dot1q', - 'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_R1_VLAN_ID}}, - {'service_endpoint_id': WIM_SEP_R3_ID, - 'service_endpoint_encapsulation_type': 'dot1q', - 'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_R3_VLAN_ID}}, + connection_point(WIM_SEP_DC1, 'dot1q', WIM_SRV_VLAN_ID), + connection_point(WIM_SEP_DC2, 'dot1q', WIM_SRV_VLAN_ID), ] - -# ----- Object Collections --------------------------------------------------------------------------------------------- - -CONTEXTS = [CONTEXT] -TOPOLOGIES = [TOPOLOGY] - -DEVICES = [ - (DEVICE_R1, DEVICE_R1_CONNECT_RULES), - (DEVICE_R2, DEVICE_R2_CONNECT_RULES), - (DEVICE_R3, DEVICE_R3_CONNECT_RULES), - (DEVICE_R4, DEVICE_R4_CONNECT_RULES), - (DEVICE_O1, DEVICE_O1_CONNECT_RULES), -] - -LINKS = [LINK_R1_O1, LINK_R2_O1, LINK_R3_O1, LINK_R4_O1] \ No newline at end of file diff --git a/src/tests/ofc22/tests/ObjectsXr.py b/src/tests/ofc22/tests/ObjectsXr.py new file mode 100644 index 0000000000000000000000000000000000000000..0cb223de2ede509443275496ba9ca57158335036 --- /dev/null +++ b/src/tests/ofc22/tests/ObjectsXr.py @@ -0,0 +1,238 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List, Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.tools.object_factory.Context import json_context, json_context_id +from common.tools.object_factory.Device import ( + json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, + json_device_emulated_tapi_disabled, json_device_id, json_device_packetrouter_disabled, json_device_tapi_disabled) +from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id +from common.tools.object_factory.Link import json_link, json_link_id +from common.tools.object_factory.Topology import json_topology, json_topology_id +from common.proto.kpi_sample_types_pb2 import KpiSampleType + +# ----- Context -------------------------------------------------------------------------------------------------------- +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) +CONTEXT = json_context(DEFAULT_CONTEXT_UUID) + +# ----- Topology ------------------------------------------------------------------------------------------------------- +TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) +TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) + +# ----- Monitoring Samples --------------------------------------------------------------------------------------------- +PACKET_PORT_SAMPLE_TYPES = [ + KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED, + KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED, + KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED, + KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED, +] + +# ----- Device Credentials and Settings -------------------------------------------------------------------------------- +try: + from .Credentials import DEVICE_R1_ADDRESS, DEVICE_R1_PORT, DEVICE_R1_USERNAME, DEVICE_R1_PASSWORD + from .Credentials import DEVICE_R3_ADDRESS, DEVICE_R3_PORT, DEVICE_R3_USERNAME, DEVICE_R3_PASSWORD + #from .Credentials import DEVICE_O1_ADDRESS, DEVICE_O1_PORT + USE_REAL_DEVICES = True # Use real devices +except ImportError: + USE_REAL_DEVICES = False # Use emulated devices + + DEVICE_R1_ADDRESS = '0.0.0.0' + DEVICE_R1_PORT = 830 + DEVICE_R1_USERNAME = 'admin' + DEVICE_R1_PASSWORD = 'admin' + + DEVICE_R3_ADDRESS = '0.0.0.0' + DEVICE_R3_PORT = 830 + DEVICE_R3_USERNAME = 'admin' + DEVICE_R3_PASSWORD = 'admin' + +DEVICE_X1_ADDRESS = '172.19.219.44' +DEVICE_X1_PORT = 443 + +#USE_REAL_DEVICES = False # Uncomment to force to use emulated devices + +def json_endpoint_ids(device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]]): + return [ + json_endpoint_id(device_id, ep_uuid, topology_id=None) + for ep_uuid, _, _ in endpoint_descriptors + ] + +def json_endpoints(device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]]): + return [ + json_endpoint(device_id, ep_uuid, ep_type, topology_id=None, kpi_sample_types=ep_sample_types) + for ep_uuid, ep_type, ep_sample_types in endpoint_descriptors + ] + +def get_link_uuid(a_device_id : Dict, a_endpoint_id : Dict, z_device_id : Dict, z_endpoint_id : Dict) -> str: + return '{:s}/{:s}=={:s}/{:s}'.format( + a_device_id['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'], + z_device_id['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid']) + + +# ----- Devices -------------------------------------------------------------------------------------------------------- +if not USE_REAL_DEVICES: + json_device_packetrouter_disabled = json_device_emulated_packet_router_disabled + json_device_tapi_disabled = json_device_emulated_tapi_disabled + +DEVICE_R1_UUID = 'R1-EMU' +DEVICE_R1_TIMEOUT = 120 +DEVICE_R1_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)] +DEVICE_R1_ID = json_device_id(DEVICE_R1_UUID) +#DEVICE_R1_ENDPOINTS = json_endpoints(DEVICE_R1_ID, DEVICE_R1_ENDPOINT_DEFS) +DEVICE_R1_ENDPOINT_IDS = json_endpoint_ids(DEVICE_R1_ID, DEVICE_R1_ENDPOINT_DEFS) +DEVICE_R1 = json_device_packetrouter_disabled(DEVICE_R1_UUID) +ENDPOINT_ID_R1_13_0_0 = DEVICE_R1_ENDPOINT_IDS[0] +ENDPOINT_ID_R1_13_1_2 = DEVICE_R1_ENDPOINT_IDS[1] +DEVICE_R1_CONNECT_RULES = json_device_connect_rules(DEVICE_R1_ADDRESS, DEVICE_R1_PORT, { + 'username': DEVICE_R1_USERNAME, + 'password': DEVICE_R1_PASSWORD, + 'timeout' : DEVICE_R1_TIMEOUT, +}) if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_R1_ENDPOINT_DEFS) + + +DEVICE_R2_UUID = 'R2-EMU' +DEVICE_R2_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)] +DEVICE_R2_ID = json_device_id(DEVICE_R2_UUID) +#DEVICE_R2_ENDPOINTS = json_endpoints(DEVICE_R2_ID, DEVICE_R2_ENDPOINT_DEFS) +DEVICE_R2_ENDPOINT_IDS = json_endpoint_ids(DEVICE_R2_ID, DEVICE_R2_ENDPOINT_DEFS) +DEVICE_R2 = json_device_emulated_packet_router_disabled(DEVICE_R2_UUID) +ENDPOINT_ID_R2_13_0_0 = DEVICE_R2_ENDPOINT_IDS[0] +ENDPOINT_ID_R2_13_1_2 = DEVICE_R2_ENDPOINT_IDS[1] +DEVICE_R2_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_R2_ENDPOINT_DEFS) + + +DEVICE_R3_UUID = 'R3-EMU' +DEVICE_R3_TIMEOUT = 120 +DEVICE_R3_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)] +DEVICE_R3_ID = json_device_id(DEVICE_R3_UUID) +#DEVICE_R3_ENDPOINTS = json_endpoints(DEVICE_R3_ID, DEVICE_R3_ENDPOINT_DEFS) +DEVICE_R3_ENDPOINT_IDS = json_endpoint_ids(DEVICE_R3_ID, DEVICE_R3_ENDPOINT_DEFS) +DEVICE_R3 = json_device_packetrouter_disabled(DEVICE_R3_UUID) +ENDPOINT_ID_R3_13_0_0 = DEVICE_R3_ENDPOINT_IDS[0] +ENDPOINT_ID_R3_13_1_2 = DEVICE_R3_ENDPOINT_IDS[1] +DEVICE_R3_CONNECT_RULES = json_device_connect_rules(DEVICE_R3_ADDRESS, DEVICE_R3_PORT, { + 'username': DEVICE_R3_USERNAME, + 'password': DEVICE_R3_PASSWORD, + 'timeout' : DEVICE_R3_TIMEOUT, +}) if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_R3_ENDPOINT_DEFS) + + +DEVICE_R4_UUID = 'R4-EMU' +DEVICE_R4_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)] +DEVICE_R4_ID = json_device_id(DEVICE_R4_UUID) +#DEVICE_R4_ENDPOINTS = json_endpoints(DEVICE_R4_ID, DEVICE_R4_ENDPOINT_DEFS) +DEVICE_R4_ENDPOINT_IDS = json_endpoint_ids(DEVICE_R4_ID, DEVICE_R4_ENDPOINT_DEFS) +DEVICE_R4 = json_device_emulated_packet_router_disabled(DEVICE_R4_UUID) +ENDPOINT_ID_R4_13_0_0 = DEVICE_R4_ENDPOINT_IDS[0] +ENDPOINT_ID_R4_13_1_2 = DEVICE_R4_ENDPOINT_IDS[1] +DEVICE_R4_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_R4_ENDPOINT_DEFS) + + +DEVICE_X1_UUID = 'X1-XR-CONSTELLATION' +DEVICE_X1_TIMEOUT = 120 +DEVICE_X1_ENDPOINT_DEFS = [ + ('XR HUB 1|XR-T1', 'optical', []), + ('XR HUB 1|XR-T2', 'optical', []), + ('XR HUB 1|XR-T3', 'optical', []), + ('XR HUB 1|XR-T4', 'optical', []), + ('XR LEAF 1|XR-T1', 'optical', []), + ('XR LEAF 2|XR-T1', 'optical', []), +] +DEVICE_X1_ID = json_device_id(DEVICE_X1_UUID) +DEVICE_X1 = json_device_tapi_disabled(DEVICE_X1_UUID) +DEVICE_X1_ENDPOINT_IDS = json_endpoint_ids(DEVICE_X1_ID, DEVICE_X1_ENDPOINT_DEFS) +# These match JSON, hence indexes are what theyt are +ENDPOINT_ID_X1_EP1 = DEVICE_X1_ENDPOINT_IDS[3] +ENDPOINT_ID_X1_EP2 = DEVICE_X1_ENDPOINT_IDS[2] +ENDPOINT_ID_X1_EP3 = DEVICE_X1_ENDPOINT_IDS[4] +ENDPOINT_ID_X1_EP4 = DEVICE_X1_ENDPOINT_IDS[5] +DEVICE_X1_CONNECT_RULES = json_device_connect_rules(DEVICE_X1_ADDRESS, DEVICE_X1_PORT, { + 'timeout' : DEVICE_X1_TIMEOUT, + "username": "xr-user-1", + "password": "xr-user-1", + "hub_module_name": "XR HUB 1" +}) +# Always using real device (CM, whether CM has emulated backend is another story) +#if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_X1_ENDPOINT_DEFS) + + +# ----- Links ---------------------------------------------------------------------------------------------------------- +LINK_R1_X1_UUID = get_link_uuid(DEVICE_R1_ID, ENDPOINT_ID_R1_13_0_0, DEVICE_X1_ID, ENDPOINT_ID_X1_EP1) +LINK_R1_X1_ID = json_link_id(LINK_R1_X1_UUID) +LINK_R1_X1 = json_link(LINK_R1_X1_UUID, [ENDPOINT_ID_R1_13_0_0, ENDPOINT_ID_X1_EP1]) + +LINK_R2_X1_UUID = get_link_uuid(DEVICE_R2_ID, ENDPOINT_ID_R2_13_0_0, DEVICE_X1_ID, ENDPOINT_ID_X1_EP2) +LINK_R2_X1_ID = json_link_id(LINK_R2_X1_UUID) +LINK_R2_X1 = json_link(LINK_R2_X1_UUID, [ENDPOINT_ID_R2_13_0_0, ENDPOINT_ID_X1_EP2]) + +LINK_R3_X1_UUID = get_link_uuid(DEVICE_R3_ID, ENDPOINT_ID_R3_13_0_0, DEVICE_X1_ID, ENDPOINT_ID_X1_EP3) +LINK_R3_X1_ID = json_link_id(LINK_R3_X1_UUID) +LINK_R3_X1 = json_link(LINK_R3_X1_UUID, [ENDPOINT_ID_R3_13_0_0, ENDPOINT_ID_X1_EP3]) + +LINK_R4_X1_UUID = get_link_uuid(DEVICE_R4_ID, ENDPOINT_ID_R4_13_0_0, DEVICE_X1_ID, ENDPOINT_ID_X1_EP4) +LINK_R4_X1_ID = json_link_id(LINK_R4_X1_UUID) +LINK_R4_X1 = json_link(LINK_R4_X1_UUID, [ENDPOINT_ID_R4_13_0_0, ENDPOINT_ID_X1_EP4]) + + +# ----- WIM Service Settings ------------------------------------------------------------------------------------------- + +def compose_service_endpoint_id(endpoint_id): + device_uuid = endpoint_id['device_id']['device_uuid']['uuid'] + endpoint_uuid = endpoint_id['endpoint_uuid']['uuid'] + return ':'.join([device_uuid, endpoint_uuid]) + +WIM_SEP_R1_ID = compose_service_endpoint_id(ENDPOINT_ID_R1_13_1_2) +WIM_SEP_R1_SITE_ID = '1' +WIM_SEP_R1_BEARER = WIM_SEP_R1_ID +WIM_SRV_R1_VLAN_ID = 400 + +WIM_SEP_R3_ID = compose_service_endpoint_id(ENDPOINT_ID_R3_13_1_2) +WIM_SEP_R3_SITE_ID = '2' +WIM_SEP_R3_BEARER = WIM_SEP_R3_ID +WIM_SRV_R3_VLAN_ID = 500 + +WIM_USERNAME = 'admin' +WIM_PASSWORD = 'admin' + +WIM_MAPPING = [ + {'device-id': DEVICE_R1_UUID, 'service_endpoint_id': WIM_SEP_R1_ID, + 'service_mapping_info': {'bearer': {'bearer-reference': WIM_SEP_R1_BEARER}, 'site-id': WIM_SEP_R1_SITE_ID}}, + {'device-id': DEVICE_R3_UUID, 'service_endpoint_id': WIM_SEP_R3_ID, + 'service_mapping_info': {'bearer': {'bearer-reference': WIM_SEP_R3_BEARER}, 'site-id': WIM_SEP_R3_SITE_ID}}, +] +WIM_SERVICE_TYPE = 'ELINE' +WIM_SERVICE_CONNECTION_POINTS = [ + {'service_endpoint_id': WIM_SEP_R1_ID, + 'service_endpoint_encapsulation_type': 'dot1q', + 'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_R1_VLAN_ID}}, + {'service_endpoint_id': WIM_SEP_R3_ID, + 'service_endpoint_encapsulation_type': 'dot1q', + 'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_R3_VLAN_ID}}, +] + +# ----- Object Collections --------------------------------------------------------------------------------------------- + +CONTEXTS = [CONTEXT] +TOPOLOGIES = [TOPOLOGY] + +DEVICES = [ + (DEVICE_R1, DEVICE_R1_CONNECT_RULES), + (DEVICE_R2, DEVICE_R2_CONNECT_RULES), + (DEVICE_R3, DEVICE_R3_CONNECT_RULES), + (DEVICE_R4, DEVICE_R4_CONNECT_RULES), + (DEVICE_X1, DEVICE_X1_CONNECT_RULES), +] + +LINKS = [LINK_R1_X1, LINK_R2_X1, LINK_R3_X1, LINK_R4_X1] diff --git a/src/tests/ofc22/tests/test_functional_bootstrap.py b/src/tests/ofc22/tests/test_functional_bootstrap.py index 76c52810bb855a28f772dcc564e97e9f3ff1f92e..71deb9d596b1494e148b140902ca927e5d664dd3 100644 --- a/src/tests/ofc22/tests/test_functional_bootstrap.py +++ b/src/tests/ofc22/tests/test_functional_bootstrap.py @@ -12,27 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, logging, pytest -from common.Settings import get_setting +import logging, time +from common.proto.context_pb2 import ContextId, Empty from common.proto.monitoring_pb2 import KpiDescriptorList -from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events +from common.tests.LoadScenario import load_scenario_from_descriptor +from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id -from common.tools.object_factory.Device import json_device_id -from common.tools.object_factory.Link import json_link_id -from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient -from monitoring.client.MonitoringClient import MonitoringClient -from context.client.EventsCollector import EventsCollector -from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology from device.client.DeviceClient import DeviceClient -from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES -from tests.Fixtures import context_client, device_client, monitoring_client +from monitoring.client.MonitoringClient import MonitoringClient +from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) +DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' -def test_scenario_empty(context_client : ContextClient): # pylint: disable=redefined-outer-name +def test_scenario_bootstrap( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: # ----- List entities - Ensure database is empty ------------------------------------------------------------------- response = context_client.ListContexts(Empty()) assert len(response.contexts) == 0 @@ -44,161 +43,53 @@ def test_scenario_empty(context_client : ContextClient): # pylint: disable=rede assert len(response.links) == 0 -def test_prepare_scenario(context_client : ContextClient): # pylint: disable=redefined-outer-name - - # ----- Start the EventsCollector ---------------------------------------------------------------------------------- - #events_collector = EventsCollector(context_client) - #events_collector.start() - - #expected_events = [] - - # ----- Create Contexts and Topologies ----------------------------------------------------------------------------- - for context in CONTEXTS: - context_uuid = context['context_id']['context_uuid']['uuid'] - LOGGER.info('Adding Context {:s}'.format(context_uuid)) - response = context_client.SetContext(Context(**context)) - assert response.context_uuid.uuid == context_uuid - #expected_events.append(('ContextEvent', EVENT_CREATE, json_context_id(context_uuid))) - - for topology in TOPOLOGIES: - context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid'] - topology_uuid = topology['topology_id']['topology_uuid']['uuid'] - LOGGER.info('Adding Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) - response = context_client.SetTopology(Topology(**topology)) - assert response.context_id.context_uuid.uuid == context_uuid - assert response.topology_uuid.uuid == topology_uuid - context_id = json_context_id(context_uuid) - #expected_events.append(('TopologyEvent', EVENT_CREATE, json_topology_id(topology_uuid, context_id=context_id))) + # ----- Load Scenario ---------------------------------------------------------------------------------------------- + descriptor_loader = load_scenario_from_descriptor( + DESCRIPTOR_FILE, context_client, device_client, None, None) - # ----- Validate Collected Events ---------------------------------------------------------------------------------- - #check_events(events_collector, expected_events) - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() - - -def test_scenario_ready(context_client : ContextClient): # pylint: disable=redefined-outer-name # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) - - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 - - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 - - -def test_devices_bootstraping( - context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name - - # ----- Start the EventsCollector ---------------------------------------------------------------------------------- - #events_collector = EventsCollector(context_client, log_events_received=True) - #events_collector.start() - - #expected_events = [] - - # ----- Create Devices and Validate Collected Events --------------------------------------------------------------- - for device, connect_rules in DEVICES: - device_uuid = device['device_id']['device_uuid']['uuid'] - LOGGER.info('Adding Device {:s}'.format(device_uuid)) - - device_with_connect_rules = copy.deepcopy(device) - device_with_connect_rules['device_config']['config_rules'].extend(connect_rules) - response = device_client.AddDevice(Device(**device_with_connect_rules)) - assert response.device_uuid.uuid == device_uuid - - #expected_events.extend([ - # # Device creation, update for automation to start the device - # ('DeviceEvent', EVENT_CREATE, json_device_id(device_uuid)), - # #('DeviceEvent', EVENT_UPDATE, json_device_id(device_uuid)), - #]) - - #response = context_client.GetDevice(response) - #for endpoint in response.device_endpoints: - # for _ in endpoint.kpi_sample_types: - # # Monitoring configures monitoring for endpoint - # expected_events.append(('DeviceEvent', EVENT_UPDATE, json_device_id(device_uuid))) - - # ----- Validate Collected Events ---------------------------------------------------------------------------------- - #check_events(events_collector, expected_events) - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() - - -def test_devices_bootstrapped(context_client : ContextClient): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure bevices are created ----------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) - - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 - - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 - - -def test_links_creation(context_client : ContextClient): # pylint: disable=redefined-outer-name - - # ----- Start the EventsCollector ---------------------------------------------------------------------------------- - #events_collector = EventsCollector(context_client) - #events_collector.start() - - #expected_events = [] - - # ----- Create Links and Validate Collected Events ----------------------------------------------------------------- - for link in LINKS: - link_uuid = link['link_id']['link_uuid']['uuid'] - LOGGER.info('Adding Link {:s}'.format(link_uuid)) - response = context_client.SetLink(Link(**link)) - assert response.link_uuid.uuid == link_uuid - #expected_events.append(('LinkEvent', EVENT_CREATE, json_link_id(link_uuid))) - - # ----- Validate Collected Events ---------------------------------------------------------------------------------- - #check_events(events_collector, expected_events) - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() - - -def test_links_created(context_client : ContextClient): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure links are created ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) + assert len(response.contexts) == descriptor_loader.num_contexts - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) + assert len(response.devices) == descriptor_loader.num_devices response = context_client.ListLinks(Empty()) - assert len(response.links) == len(LINKS) - - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 + assert len(response.links) == descriptor_loader.num_links + for context_uuid, _ in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == 0 -def test_scenario_kpis_created(monitoring_client: MonitoringClient): +def test_scenario_kpis_created( + context_client : ContextClient, # pylint: disable=redefined-outer-name + monitoring_client: MonitoringClient, # pylint: disable=redefined-outer-name +) -> None: """ This test validates that KPIs related to the service/device/endpoint were created during the service creation process. """ - response: KpiDescriptorList = monitoring_client.GetKpiDescriptorList(Empty()) - LOGGER.info("Number of KPIs created: {}".format(len(response.kpi_descriptor_list))) - # TODO: replace the magic number `16` below for a formula that adapts to the number - # of links and devices - assert len(response.kpi_descriptor_list) == 16 + response = context_client.ListDevices(Empty()) + kpis_expected = set() + for device in response.devices: + device_uuid = device.device_id.device_uuid.uuid + for endpoint in device.device_endpoints: + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + for kpi_sample_type in endpoint.kpi_sample_types: + kpis_expected.add((device_uuid, endpoint_uuid, kpi_sample_type)) + num_kpis_expected = len(kpis_expected) + LOGGER.info('Num KPIs expected: {:d}'.format(num_kpis_expected)) + + num_kpis_created, num_retry = 0, 0 + while (num_kpis_created != num_kpis_expected) and (num_retry < 5): + response: KpiDescriptorList = monitoring_client.GetKpiDescriptorList(Empty()) + num_kpis_created = len(response.kpi_descriptor_list) + LOGGER.info('Num KPIs created: {:d}'.format(num_kpis_created)) + time.sleep(0.5) + num_retry += 1 + assert num_kpis_created == num_kpis_expected diff --git a/src/tests/ofc22/tests/test_functional_cleanup.py b/src/tests/ofc22/tests/test_functional_cleanup.py index b0dfe54900f5a806607fcd669942e7fa592dcbaa..be807eaa0242f2363b5b6c189ce4de264528a54c 100644 --- a/src/tests/ofc22/tests/test_functional_cleanup.py +++ b/src/tests/ofc22/tests/test_functional_cleanup.py @@ -12,93 +12,63 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, pytest -from common.Settings import get_setting -from common.tests.EventTools import EVENT_REMOVE, check_events +import logging +from common.tools.descriptor.Loader import DescriptorLoader from common.tools.object_factory.Context import json_context_id -from common.tools.object_factory.Device import json_device_id -from common.tools.object_factory.Link import json_link_id -from common.tools.object_factory.Topology import json_topology_id -from context.client.ContextClient import ContextClient -from context.client.EventsCollector import EventsCollector from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId +from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient -from tests.Fixtures import context_client, device_client -from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES +from tests.Fixtures import context_client, device_client # pylint: disable=unused-import LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) +DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' -def test_services_removed(context_client : ContextClient): # pylint: disable=redefined-outer-name + +def test_services_removed( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: # ----- List entities - Ensure service is removed ------------------------------------------------------------------ + with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: + descriptors = f.read() + + descriptor_loader = DescriptorLoader(descriptors) + response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) + assert len(response.contexts) == descriptor_loader.num_contexts - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) + assert len(response.devices) == descriptor_loader.num_devices response = context_client.ListLinks(Empty()) - assert len(response.links) == len(LINKS) - - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 - - -def test_scenario_cleanup( - context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name - - # ----- Start the EventsCollector ---------------------------------------------------------------------------------- - #events_collector = EventsCollector(context_client) - #events_collector.start() - - #expected_events = [] - - # ----- Delete Links and Validate Collected Events ----------------------------------------------------------------- - for link in LINKS: - link_id = link['link_id'] - link_uuid = link_id['link_uuid']['uuid'] - LOGGER.info('Deleting Link {:s}'.format(link_uuid)) - context_client.RemoveLink(LinkId(**link_id)) - #expected_events.append(('LinkEvent', EVENT_REMOVE, json_link_id(link_uuid))) - - # ----- Delete Devices and Validate Collected Events --------------------------------------------------------------- - for device, _ in DEVICES: - device_id = device['device_id'] - device_uuid = device_id['device_uuid']['uuid'] - LOGGER.info('Deleting Device {:s}'.format(device_uuid)) - device_client.DeleteDevice(DeviceId(**device_id)) - #expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid))) - - # ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------ - for topology in TOPOLOGIES: - topology_id = topology['topology_id'] - context_uuid = topology_id['context_id']['context_uuid']['uuid'] - topology_uuid = topology_id['topology_uuid']['uuid'] - LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) - context_client.RemoveTopology(TopologyId(**topology_id)) - context_id = json_context_id(context_uuid) - #expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id))) - - # ----- Delete Contexts and Validate Collected Events -------------------------------------------------------------- - for context in CONTEXTS: - context_id = context['context_id'] - context_uuid = context_id['context_uuid']['uuid'] - LOGGER.info('Deleting Context {:s}'.format(context_uuid)) - context_client.RemoveContext(ContextId(**context_id)) - #expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid))) - - # ----- Validate Collected Events ---------------------------------------------------------------------------------- - #check_events(events_collector, expected_events) - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() - - -def test_scenario_empty_again(context_client : ContextClient): # pylint: disable=redefined-outer-name + assert len(response.links) == descriptor_loader.num_links + + for context_uuid, _ in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == 0 + + + # ----- Delete Links, Devices, Topologies, Contexts ---------------------------------------------------------------- + for link in descriptor_loader.links: + context_client.RemoveLink(LinkId(**link['link_id'])) + + for device in descriptor_loader.devices: + device_client .DeleteDevice(DeviceId(**device['device_id'])) + + for context_uuid, topology_list in descriptor_loader.topologies.items(): + for topology in topology_list: + context_client.RemoveTopology(TopologyId(**topology['topology_id'])) + + for context in descriptor_loader.contexts: + context_client.RemoveContext(ContextId(**context['context_id'])) + + # ----- List entities - Ensure database is empty again ------------------------------------------------------------- response = context_client.ListContexts(Empty()) assert len(response.contexts) == 0 diff --git a/src/tests/ofc22/tests/test_functional_create_service.py b/src/tests/ofc22/tests/test_functional_create_service.py index 5615f119b91fba10dd767d7188b303f926750e06..e606d060d52631ba72e191d7c025bd7b43048b39 100644 --- a/src/tests/ofc22/tests/test_functional_create_service.py +++ b/src/tests/ofc22/tests/test_functional_create_service.py @@ -12,24 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, pytest, random, time +import logging, random from common.DeviceTypes import DeviceTypeEnum -from common.Settings import get_setting -from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events -from common.tools.object_factory.Connection import json_connection_id -from common.tools.object_factory.Device import json_device_id -from common.tools.object_factory.Service import json_service_id +from common.proto.context_pb2 import ContextId, Empty +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from common.tools.descriptor.Loader import DescriptorLoader from common.tools.grpc.Tools import grpc_message_to_json_string -from compute.tests.mock_osm.MockOSM import MockOSM +from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from monitoring.client.MonitoringClient import MonitoringClient -from context.client.EventsCollector import EventsCollector -from common.proto.context_pb2 import ContextId, Empty -from tests.Fixtures import context_client, monitoring_client -from .Fixtures import osm_wim -from .Objects import ( - CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, - WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE) +from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import +from tests.tools.mock_osm.MockOSM import MockOSM +from .Fixtures import osm_wim # pylint: disable=unused-import +from .Objects import WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) @@ -37,89 +32,69 @@ LOGGER.setLevel(logging.DEBUG) DEVTYPE_EMU_PR = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value +DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' + +def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- + with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: + descriptors = f.read() + + descriptor_loader = DescriptorLoader(descriptors) -def test_scenario_is_correct(context_client : ContextClient): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure links are created ------------------------------------------------------------------- response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) + assert len(response.contexts) == descriptor_loader.num_contexts - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) + assert len(response.devices) == descriptor_loader.num_devices response = context_client.ListLinks(Empty()) - assert len(response.links) == len(LINKS) + assert len(response.links) == descriptor_loader.num_links - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 + for context_uuid, num_services in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == 0 -def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- Start the EventsCollector ---------------------------------------------------------------------------------- - # TODO: restablish the tests of the events - # events_collector = EventsCollector(context_client, log_events_received=True) - # events_collector.start() - # ----- Create Service --------------------------------------------------------------------------------------------- service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS) osm_wim.get_connectivity_service_status(service_uuid) - # ----- Validate collected events ---------------------------------------------------------------------------------- - - # packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR) - # optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS) - # optical_service_uuid = '{:s}:optical'.format(service_uuid) - - # expected_events = [ - # # Create packet service and add first endpoint - # ('ServiceEvent', EVENT_CREATE, json_service_id(service_uuid, context_id=CONTEXT_ID)), - # ('ServiceEvent', EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)), - - # # Configure OLS controller, create optical service, create optical connection - # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)), - # ('ServiceEvent', EVENT_CREATE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)), - # ('ConnectionEvent', EVENT_CREATE, json_connection_id(optical_connection_uuid)), - - # # Configure endpoint packet devices, add second endpoint to service, create connection - # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)), - # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)), - # ('ServiceEvent', EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)), - # ('ConnectionEvent', EVENT_CREATE, json_connection_id(packet_connection_uuid)), - # ] - # check_events(events_collector, expected_events) - - # # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - # events_collector.stop() - - -def test_scenario_service_created(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure service is created ------------------------------------------------------------------ response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) + assert len(response.contexts) == descriptor_loader.num_contexts - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) + assert len(response.devices) == descriptor_loader.num_devices response = context_client.ListLinks(Empty()) - assert len(response.links) == len(LINKS) + assert len(response.links) == descriptor_loader.num_links + + for context_uuid, num_services in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 2*num_services # OLS & L3NM => (L3NM + TAPI) - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) - assert len(response.services) == 2 # L3NM + TAPI - for service in response.services: - service_id = service.service_id - response = context_client.ListConnections(service_id) - LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( - grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) - assert len(response.connections) == 1 # one connection per service + for service in response.services: + service_id = service.service_id + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response))) + assert len(response.connections) == 1 # one connection per service -def test_scenario_kpi_values_created(monitoring_client: MonitoringClient): +def test_scenario_kpi_values_created( + monitoring_client: MonitoringClient, # pylint: disable=redefined-outer-name +) -> None: """ This test validates that KPI values have been inserted into the monitoring database. We short k KPI descriptors to test. @@ -128,6 +103,22 @@ def test_scenario_kpi_values_created(monitoring_client: MonitoringClient): kpi_descriptors = random.choices(response.kpi_descriptor_list, k=2) for kpi_descriptor in kpi_descriptors: + MSG = 'KPI(kpi_uuid={:s}, device_uuid={:s}, endpoint_uuid={:s}, service_uuid={:s}, kpi_sample_type={:s})...' + LOGGER.info(MSG.format( + str(kpi_descriptor.kpi_id.kpi_id.uuid), str(kpi_descriptor.device_id.device_uuid.uuid), + str(kpi_descriptor.endpoint_id.endpoint_uuid.uuid), str(kpi_descriptor.service_id.service_uuid.uuid), + str(KpiSampleType.Name(kpi_descriptor.kpi_sample_type)))) response = monitoring_client.GetInstantKpi(kpi_descriptor.kpi_id) - assert response.kpi_id.kpi_id.uuid == kpi_descriptor.kpi_id.kpi_id.uuid - assert response.timestamp.timestamp > 0 + kpi_uuid = response.kpi_id.kpi_id.uuid + assert kpi_uuid == kpi_descriptor.kpi_id.kpi_id.uuid + kpi_value_type = response.kpi_value.WhichOneof('value') + if kpi_value_type is None: + MSG = ' KPI({:s}): No instant value found' + LOGGER.warning(MSG.format(str(kpi_uuid))) + else: + kpi_timestamp = response.timestamp.timestamp + assert kpi_timestamp > 0 + assert kpi_value_type == 'floatVal' + kpi_value = getattr(response.kpi_value, kpi_value_type) + MSG = ' KPI({:s}): timestamp={:s} value_type={:s} value={:s}' + LOGGER.info(MSG.format(str(kpi_uuid), str(kpi_timestamp), str(kpi_value_type), str(kpi_value))) diff --git a/src/tests/ofc22/tests/test_functional_create_service_xr.py b/src/tests/ofc22/tests/test_functional_create_service_xr.py new file mode 100644 index 0000000000000000000000000000000000000000..bb78abc1efe7701308448ad4b83ef2a6e32079c4 --- /dev/null +++ b/src/tests/ofc22/tests/test_functional_create_service_xr.py @@ -0,0 +1,129 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pytest +from common.DeviceTypes import DeviceTypeEnum +from common.Settings import get_setting +from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events +from common.tools.object_factory.Connection import json_connection_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Service import json_service_id +from common.tools.grpc.Tools import grpc_message_to_json_string +from compute.tests.mock_osm.MockOSM import MockOSM +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from common.proto.context_pb2 import ContextId, Empty +from .ObjectsXr import ( + CONTEXT_ID, CONTEXTS, DEVICE_X1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, + WIM_MAPPING, WIM_PASSWORD, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE, WIM_USERNAME) + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DEVTYPE_EMU_PR = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value +DEVTYPE_XR_CONSTELLATION = DeviceTypeEnum.XR_CONSTELLATION.value + + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +@pytest.fixture(scope='session') +def osm_wim(): + wim_url = 'http://{:s}:{:s}'.format( + get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP'))) + return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD) + + +def test_scenario_is_correct(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure links are created ------------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) + + response = context_client.ListLinks(Empty()) + assert len(response.links) == len(LINKS) + + response = context_client.ListServices(ContextId(**CONTEXT_ID)) + assert len(response.services) == 0 + + +def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name + # ----- Start the EventsCollector ---------------------------------------------------------------------------------- + # events_collector = EventsCollector(context_client, log_events_received=True) + # events_collector.start() + + # ----- Create Service --------------------------------------------------------------------------------------------- + service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS) + osm_wim.get_connectivity_service_status(service_uuid) + + # ----- Validate collected events ---------------------------------------------------------------------------------- + + # packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR) + # optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_XR_CONSTELLATION) + # optical_service_uuid = '{:s}:optical'.format(service_uuid) + + # expected_events = [ + # # Create packet service and add first endpoint + # ('ServiceEvent', EVENT_CREATE, json_service_id(service_uuid, context_id=CONTEXT_ID)), + # ('ServiceEvent', EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)), + + # # Configure OLS controller, create optical service, create optical connection + # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_X1_UUID)), + # ('ServiceEvent', EVENT_CREATE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)), + # ('ConnectionEvent', EVENT_CREATE, json_connection_id(optical_connection_uuid)), + + # # Configure endpoint packet devices, add second endpoint to service, create connection + # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)), + # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)), + # ('ServiceEvent', EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)), + # ('ConnectionEvent', EVENT_CREATE, json_connection_id(packet_connection_uuid)), + # ] + # check_events(events_collector, expected_events) + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + # events_collector.stop() + + +def test_scenario_service_created(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure service is created ------------------------------------------------------------------ + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) + + response = context_client.ListLinks(Empty()) + assert len(response.links) == len(LINKS) + + response = context_client.ListServices(ContextId(**CONTEXT_ID)) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 2 # L3NM + TAPI + for service in response.services: + service_id = service.service_id + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + assert len(response.connections) == 1 # one connection per service diff --git a/src/tests/ofc22/tests/test_functional_delete_service.py b/src/tests/ofc22/tests/test_functional_delete_service.py index 5d9568cd81906ac76b600a2253a5e0bdf741bc01..0f8d088012bed164e4603a813bfe9154eda8f568 100644 --- a/src/tests/ofc22/tests/test_functional_delete_service.py +++ b/src/tests/ofc22/tests/test_functional_delete_service.py @@ -12,23 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, pytest +import logging +from common.Constants import DEFAULT_CONTEXT_UUID from common.DeviceTypes import DeviceTypeEnum -from common.Settings import get_setting -from common.tests.EventTools import EVENT_REMOVE, EVENT_UPDATE, check_events -from common.tools.object_factory.Connection import json_connection_id -from common.tools.object_factory.Device import json_device_id -from common.tools.object_factory.Service import json_service_id +from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum +from common.tools.descriptor.Loader import DescriptorLoader +from common.tools.object_factory.Context import json_context_id from common.tools.grpc.Tools import grpc_message_to_json_string -from compute.tests.mock_osm.MockOSM import MockOSM from context.client.ContextClient import ContextClient -from context.client.EventsCollector import EventsCollector -from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum -from tests.Fixtures import context_client -from .Fixtures import osm_wim -from .Objects import ( - CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING, - WIM_PASSWORD, WIM_USERNAME) +from tests.Fixtures import context_client # pylint: disable=unused-import +from tests.tools.mock_osm.MockOSM import MockOSM +from .Fixtures import osm_wim # pylint: disable=unused-import LOGGER = logging.getLogger(__name__) @@ -37,86 +31,69 @@ LOGGER.setLevel(logging.DEBUG) DEVTYPE_EMU_PR = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value +DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' -def test_scenario_is_correct(context_client : ContextClient): # pylint: disable=redefined-outer-name + +def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name # ----- List entities - Ensure service is created ------------------------------------------------------------------ + with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: + descriptors = f.read() + + descriptor_loader = DescriptorLoader(descriptors) + response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) + assert len(response.contexts) == descriptor_loader.num_contexts - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) + assert len(response.devices) == descriptor_loader.num_devices response = context_client.ListLinks(Empty()) - assert len(response.links) == len(LINKS) + assert len(response.links) == descriptor_loader.num_links - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) - assert len(response.services) == 2 # L3NM + TAPI + l3nm_service_uuids = set() + response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))) + assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI) for service in response.services: service_id = service.service_id + + if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM: + service_uuid = service_id.service_uuid.uuid + l3nm_service_uuids.add(service_uuid) + osm_wim.conn_info[service_uuid] = {} + response = context_client.ListConnections(service_id) LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( - grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response))) assert len(response.connections) == 1 # one connection per service + # Identify service to delete + assert len(l3nm_service_uuids) == 1 # assume a single L3NM service has been created + l3nm_service_uuid = set(l3nm_service_uuids).pop() -def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- Start the EventsCollector ---------------------------------------------------------------------------------- - #events_collector = EventsCollector(context_client, log_events_received=True) - #events_collector.start() # ----- Delete Service --------------------------------------------------------------------------------------------- - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) - assert len(response.services) == 2 # L3NM + TAPI - service_uuids = set() - for service in response.services: - if service.service_type != ServiceTypeEnum.SERVICETYPE_L3NM: continue - service_uuid = service.service_id.service_uuid.uuid - service_uuids.add(service_uuid) - osm_wim.conn_info[service_uuid] = {} - - assert len(service_uuids) == 1 # assume a single L3NM service has been created - service_uuid = set(service_uuids).pop() - - osm_wim.delete_connectivity_service(service_uuid) - - # ----- Validate collected events ---------------------------------------------------------------------------------- - #packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR) - #optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS) - #optical_service_uuid = '{:s}:optical'.format(service_uuid) - - #expected_events = [ - # ('ConnectionEvent', EVENT_REMOVE, json_connection_id(packet_connection_uuid)), - # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)), - # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)), - # ('ServiceEvent', EVENT_REMOVE, json_service_id(service_uuid, context_id=CONTEXT_ID)), - # ('ConnectionEvent', EVENT_REMOVE, json_connection_id(optical_connection_uuid)), - # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)), - # ('ServiceEvent', EVENT_REMOVE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)), - #] - #check_events(events_collector, expected_events) - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() + osm_wim.delete_connectivity_service(l3nm_service_uuid) -def test_services_removed(context_client : ContextClient): # pylint: disable=redefined-outer-name # ----- List entities - Ensure service is removed ------------------------------------------------------------------ response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) + assert len(response.contexts) == descriptor_loader.num_contexts - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) + assert len(response.devices) == descriptor_loader.num_devices response = context_client.ListLinks(Empty()) - assert len(response.links) == len(LINKS) + assert len(response.links) == descriptor_loader.num_links - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 + for context_uuid, num_services in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == 0 diff --git a/src/tests/ofc22/tests/test_functional_delete_service_xr.py b/src/tests/ofc22/tests/test_functional_delete_service_xr.py new file mode 100644 index 0000000000000000000000000000000000000000..f28828be056e755058a0f6b15bd8ea3e9acbbdeb --- /dev/null +++ b/src/tests/ofc22/tests/test_functional_delete_service_xr.py @@ -0,0 +1,133 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pytest +from common.DeviceTypes import DeviceTypeEnum +from common.Settings import get_setting +from common.tests.EventTools import EVENT_REMOVE, EVENT_UPDATE, check_events +from common.tools.object_factory.Connection import json_connection_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Service import json_service_id +from common.tools.grpc.Tools import grpc_message_to_json_string +from compute.tests.mock_osm.MockOSM import MockOSM +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum +from .ObjectsXr import ( + CONTEXT_ID, CONTEXTS, DEVICE_X1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING, + WIM_PASSWORD, WIM_USERNAME) + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DEVTYPE_EMU_PR = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value +DEVTYPE_XR_CONSTELLATION = DeviceTypeEnum.XR_CONSTELLATION.value + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +@pytest.fixture(scope='session') +def osm_wim(): + wim_url = 'http://{:s}:{:s}'.format( + get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP'))) + return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD) + + +def test_scenario_is_correct(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure service is created ------------------------------------------------------------------ + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) + + response = context_client.ListLinks(Empty()) + assert len(response.links) == len(LINKS) + + response = context_client.ListServices(ContextId(**CONTEXT_ID)) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 2 # L3NM + TAPI + for service in response.services: + service_id = service.service_id + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + assert len(response.connections) == 1 # one connection per service + + +def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name + # ----- Start the EventsCollector ---------------------------------------------------------------------------------- + events_collector = EventsCollector(context_client, log_events_received=True) + events_collector.start() + + # ----- Delete Service --------------------------------------------------------------------------------------------- + response = context_client.ListServices(ContextId(**CONTEXT_ID)) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 2 # L3NM + TAPI + service_uuids = set() + for service in response.services: + if service.service_type != ServiceTypeEnum.SERVICETYPE_L3NM: continue + service_uuid = service.service_id.service_uuid.uuid + service_uuids.add(service_uuid) + osm_wim.conn_info[service_uuid] = {} + + assert len(service_uuids) == 1 # assume a single service has been created + service_uuid = set(service_uuids).pop() + + osm_wim.delete_connectivity_service(service_uuid) + + # ----- Validate collected events ---------------------------------------------------------------------------------- + # packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR) + # optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_XR_CONSTELLATION) + # optical_service_uuid = '{:s}:optical'.format(service_uuid) + + # expected_events = [ + # ('ConnectionEvent', EVENT_REMOVE, json_connection_id(packet_connection_uuid)), + # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)), + # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)), + # ('ServiceEvent', EVENT_REMOVE, json_service_id(service_uuid, context_id=CONTEXT_ID)), + # ('ConnectionEvent', EVENT_REMOVE, json_connection_id(optical_connection_uuid)), + # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_X1_UUID)), + # ('ServiceEvent', EVENT_REMOVE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)), + # ] + # check_events(events_collector, expected_events) + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + # events_collector.stop() + + +def test_services_removed(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure service is removed ------------------------------------------------------------------ + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) + + response = context_client.ListLinks(Empty()) + assert len(response.links) == len(LINKS) + + response = context_client.ListServices(ContextId(**CONTEXT_ID)) + assert len(response.services) == 0 diff --git a/src/tests/scenario2/MultiIngressController.txt b/src/tests/scenario2/MultiIngressController.txt new file mode 100644 index 0000000000000000000000000000000000000000..b2d6d322465cb1d776b043e5de4dd474d2f0d9c6 --- /dev/null +++ b/src/tests/scenario2/MultiIngressController.txt @@ -0,0 +1,35 @@ +# Ref: https://kubernetes.github.io/ingress-nginx/user-guide/multiple-ingress/ +# Ref: https://fabianlee.org/2021/07/29/kubernetes-microk8s-with-multiple-metallb-endpoints-and-nginx-ingress-controllers/ + +# Check node limits +kubectl describe nodes + +# Create secondary ingress controllers +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom1.yaml +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom2.yaml +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom3.yaml +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom4.yaml + +# Delete secondary ingress controllers +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom1.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom2.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom3.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom4.yaml + +source nfvsdn22/deploy_specs_dom1.sh +./deploy.sh + +source nfvsdn22/deploy_specs_dom2.sh +./deploy.sh + +source nfvsdn22/deploy_specs_dom3.sh +./deploy.sh + +source nfvsdn22/deploy_specs_dom4.sh +./deploy.sh + +# Manually deploy ingresses for domains +kubectl --namespace tfs-dom1 apply -f nfvsdn22/tfs-ingress-dom1.yaml +kubectl --namespace tfs-dom2 apply -f nfvsdn22/tfs-ingress-dom2.yaml +kubectl --namespace tfs-dom3 apply -f nfvsdn22/tfs-ingress-dom3.yaml +kubectl --namespace tfs-dom4 apply -f nfvsdn22/tfs-ingress-dom4.yaml diff --git a/src/tests/scenario2/Scenario.md b/src/tests/scenario2/Scenario.md new file mode 100644 index 0000000000000000000000000000000000000000..8dad4691ade669522b5c82a5e4ed07e5d0279492 --- /dev/null +++ b/src/tests/scenario2/Scenario.md @@ -0,0 +1,47 @@ +# Scenario: + +- 4 TFS instances + + - domain D1 (source for e-2-e service) + 5 routers + 1 DC + R1@D1/2 <--> R2@D1/1 + R2@D1/3 <--> R3@D1/2 + R2@D1/5 <--> R5@D1/2 + R3@D1/4 <--> R4@D1/3 + R4@D1/5 <--> R5@D1/4 + R5@D1/1 <--> R1@D1/5 + R1@D1/100 <--> DCGW@D1/eth1 + + - domain D2 (transit for e-2-e service) + 6 routers + R1@D2/2 <--> R2@D2/1 + R1@D2/6 <--> R6@D2/1 + R1@D2/5 <--> R5@D2/1 + R2@D2/3 <--> R3@D2/2 + R2@D2/4 <--> R4@D2/2 + R2@D2/5 <--> R5@D2/2 + R2@D2/6 <--> R6@D2/2 + R3@D2/6 <--> R6@D2/3 + R4@D2/5 <--> R5@D2/4 + + - domain D3 (transit for e-2-e service) + 4 routers + R1@D3/2 <--> R2@D3/1 + R2@D3/3 <--> R3@D3/2 + R3@D3/4 <--> R4@D3/3 + R4@D3/1 <--> R1@D3/4 + R2@D3/4 <--> R4@D3/2 + + - domain D4 (end for e-2-e service) + 3 routers + R1@D4/2 <--> R2@D4/1 + R1@D4/3 <--> R3@D4/1 + R2@D4/3 <--> R3@D4/2 + R3@D4/100 <--> DCGW@D4/eth1 + + - interdomain links + R4@D1/10 <--> R1@D2/10 + R5@D1/10 <--> R1@D3/10 + R4@D2/10 <--> R2@D4/10 + R5@D2/10 <--> R2@D3/10 + R3@D3/10 <--> R1@D4/10 diff --git a/src/tests/scenario2/delete_all.sh b/src/tests/scenario2/delete_all.sh new file mode 100755 index 0000000000000000000000000000000000000000..5d3e55831c85a3ef547d8e02a29f507663bfa789 --- /dev/null +++ b/src/tests/scenario2/delete_all.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# Delete old namespaces +kubectl delete namespace tfs-dom1 tfs-dom2 tfs-dom3 tfs-dom4 tfs-bchain + +# Delete secondary ingress controllers +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom1.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom2.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom3.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom4.yaml diff --git a/src/tests/scenario2/deploy_all.sh b/src/tests/scenario2/deploy_all.sh new file mode 100755 index 0000000000000000000000000000000000000000..582a97ac57f624de93e5865b7dcb190a6797bd5b --- /dev/null +++ b/src/tests/scenario2/deploy_all.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Delete old namespaces +kubectl delete namespace tfs-dom1 tfs-dom2 tfs-dom3 tfs-dom4 + +# Delete secondary ingress controllers +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom1.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom2.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom3.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom4.yaml + +# Delete MockBlockchain +#kubectl delete namespace tfs-bchain + +# Create secondary ingress controllers +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom1.yaml +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom2.yaml +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom3.yaml +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom4.yaml + +# Create MockBlockchain +#./deploy_mock_blockchain.sh + +# Deploy TFS for Domain 1 +source nfvsdn22/deploy_specs_dom1.sh +./deploy.sh +mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom1.sh + +# Deploy TFS for Domain 2 +source nfvsdn22/deploy_specs_dom2.sh +./deploy.sh +mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom2.sh + +# Deploy TFS for Domain 3 +source nfvsdn22/deploy_specs_dom3.sh +./deploy.sh +mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom3.sh + +# Deploy TFS for Domain 4 +source nfvsdn22/deploy_specs_dom4.sh +./deploy.sh +mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom4.sh diff --git a/src/tests/scenario2/deploy_specs_dom1.sh b/src/tests/scenario2/deploy_specs_dom1.sh new file mode 100644 index 0000000000000000000000000000000000000000..06d32e005f36d883c44d195ccfd20ec9b7e9a4b8 --- /dev/null +++ b/src/tests/scenario2/deploy_specs_dom1.sh @@ -0,0 +1,21 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui" + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs-dom1" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom1.yaml" + +# Set the neew Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD="NO" diff --git a/src/tests/scenario2/deploy_specs_dom2.sh b/src/tests/scenario2/deploy_specs_dom2.sh new file mode 100644 index 0000000000000000000000000000000000000000..df1726cd31606ada5d2a33d50550b52c02ccbee4 --- /dev/null +++ b/src/tests/scenario2/deploy_specs_dom2.sh @@ -0,0 +1,21 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui" + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs-dom2" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom2.yaml" + +# Set the neew Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD="YES" diff --git a/src/tests/scenario2/deploy_specs_dom3.sh b/src/tests/scenario2/deploy_specs_dom3.sh new file mode 100644 index 0000000000000000000000000000000000000000..027762e3e70d0d1cd76b8d3303ae17c97ea781c7 --- /dev/null +++ b/src/tests/scenario2/deploy_specs_dom3.sh @@ -0,0 +1,21 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui" + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs-dom3" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom3.yaml" + +# Set the neew Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD="YES" diff --git a/src/tests/scenario2/deploy_specs_dom4.sh b/src/tests/scenario2/deploy_specs_dom4.sh new file mode 100644 index 0000000000000000000000000000000000000000..a09e9fa899a0ca9fc941fd09496113a20aebbe59 --- /dev/null +++ b/src/tests/scenario2/deploy_specs_dom4.sh @@ -0,0 +1,21 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui" + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs-dom4" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom4.yaml" + +# Set the neew Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD="YES" diff --git a/src/tests/scenario2/descriptors/domain1.json b/src/tests/scenario2/descriptors/domain1.json new file mode 100644 index 0000000000000000000000000000000000000000..043b3955f017631203a437cf853c3617cddf93c8 --- /dev/null +++ b/src/tests/scenario2/descriptors/domain1.json @@ -0,0 +1,148 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}} + ], "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}, + "device_ids": [ + {"device_uuid": {"uuid": "DC1"}}, + {"device_uuid": {"uuid": "R1@D1"}}, + {"device_uuid": {"uuid": "R2@D1"}}, + {"device_uuid": {"uuid": "R3@D1"}}, + {"device_uuid": {"uuid": "R4@D1"}}, + {"device_uuid": {"uuid": "R5@D1"}} + ], "link_ids": [ + {"link_uuid": {"uuid": "DC1/D1==R1@D1/DC1"}}, + {"link_uuid": {"uuid": "R1@D1/2==R2@D1/1"}}, + {"link_uuid": {"uuid": "R2@D1/3==R3@D1/2"}}, + {"link_uuid": {"uuid": "R2@D1/5==R5@D1/2"}}, + {"link_uuid": {"uuid": "R3@D1/4==R4@D1/3"}}, + {"link_uuid": {"uuid": "R4@D1/5==R5@D1/4"}}, + {"link_uuid": {"uuid": "R5@D1/1==R1@D1/5"}} + ] + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "DC1"}}, "device_type": "emu-datacenter", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/border", "uuid": "D1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "int"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R1@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "5"}, + {"sample_types": [], "type": "copper/border", "uuid": "DC1"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "3"}, + {"sample_types": [], "type": "copper/internal", "uuid": "5"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "4"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R4@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "3"}, + {"sample_types": [], "type": "copper/internal", "uuid": "5"}, + {"sample_types": [], "type": "copper/border", "uuid": "D2"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R5@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "4"}, + {"sample_types": [], "type": "copper/border", "uuid": "D3"} + ]}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "DC1/D1==R1@D1/DC1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "D1"}}, + {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "DC1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R1@D1/2==R2@D1/1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "2"}}, + {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D1/3==R3@D1/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D1/5==R5@D1/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "5"}}, + {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R3@D1/4==R4@D1/3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "4"}}, + {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "3"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R4@D1/5==R5@D1/4"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "5"}}, + {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "4"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R5@D1/1==R1@D1/5"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "1"}}, + {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "5"}} + ] + } + ] +} diff --git a/src/tests/scenario2/descriptors/domain2.json b/src/tests/scenario2/descriptors/domain2.json new file mode 100644 index 0000000000000000000000000000000000000000..81d397abfd3571b1177a06172188b00eed2f3afc --- /dev/null +++ b/src/tests/scenario2/descriptors/domain2.json @@ -0,0 +1,166 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}} + ], "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}, + "device_ids": [ + {"device_uuid": {"uuid": "R1@D2"}}, + {"device_uuid": {"uuid": "R2@D2"}}, + {"device_uuid": {"uuid": "R3@D2"}}, + {"device_uuid": {"uuid": "R4@D2"}}, + {"device_uuid": {"uuid": "R5@D2"}}, + {"device_uuid": {"uuid": "R6@D2"}} + ], "link_ids": [ + {"link_uuid": {"uuid": "R1@D2/2==R2@D2/1"}}, + {"link_uuid": {"uuid": "R1@D2/6==R6@D2/1"}}, + {"link_uuid": {"uuid": "R1@D2/5==R5@D2/1"}}, + {"link_uuid": {"uuid": "R2@D2/3==R3@D2/2"}}, + {"link_uuid": {"uuid": "R2@D2/4==R4@D2/2"}}, + {"link_uuid": {"uuid": "R2@D2/5==R5@D2/2"}}, + {"link_uuid": {"uuid": "R2@D2/6==R6@D2/2"}}, + {"link_uuid": {"uuid": "R3@D2/6==R6@D2/3"}}, + {"link_uuid": {"uuid": "R4@D2/5==R5@D2/4"}} + ] + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "R1@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "5"}, + {"sample_types": [], "type": "copper/internal", "uuid": "6"}, + {"sample_types": [], "type": "copper/border", "uuid": "D1"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "3"}, + {"sample_types": [], "type": "copper/internal", "uuid": "4"}, + {"sample_types": [], "type": "copper/internal", "uuid": "5"}, + {"sample_types": [], "type": "copper/internal", "uuid": "6"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "6"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R4@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "5"}, + {"sample_types": [], "type": "copper/border", "uuid": "D4"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R5@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "4"}, + {"sample_types": [], "type": "copper/border", "uuid": "D3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R6@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "3"} + ]}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "R1@D2/2==R2@D2/1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "2"}}, + {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R1@D2/6==R6@D2/1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "6"}}, + {"device_id": {"device_uuid": {"uuid": "R6@D2"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R1@D2/5==R5@D2/1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "5"}}, + {"device_id": {"device_uuid": {"uuid": "R5@D2"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D2/3==R3@D2/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D2/4==R4@D2/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "4"}}, + {"device_id": {"device_uuid": {"uuid": "R4@D2"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D2/5==R5@D2/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "5"}}, + {"device_id": {"device_uuid": {"uuid": "R5@D2"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D2/6==R6@D2/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "6"}}, + {"device_id": {"device_uuid": {"uuid": "R6@D2"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R3@D2/6==R6@D2/3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "6"}}, + {"device_id": {"device_uuid": {"uuid": "R6@D2"}}, "endpoint_uuid": {"uuid": "3"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R4@D2/5==R5@D2/4"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4@D2"}}, "endpoint_uuid": {"uuid": "5"}}, + {"device_id": {"device_uuid": {"uuid": "R5@D2"}}, "endpoint_uuid": {"uuid": "4"}} + ] + } + ] +} diff --git a/src/tests/scenario2/descriptors/domain3.json b/src/tests/scenario2/descriptors/domain3.json new file mode 100644 index 0000000000000000000000000000000000000000..3a8e47d30dcef471b388f46d4ba5df5df4716256 --- /dev/null +++ b/src/tests/scenario2/descriptors/domain3.json @@ -0,0 +1,110 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D3"}} + ], "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D3"}}, + "device_ids": [ + {"device_uuid": {"uuid": "R1@D3"}}, + {"device_uuid": {"uuid": "R2@D3"}}, + {"device_uuid": {"uuid": "R3@D3"}}, + {"device_uuid": {"uuid": "R4@D3"}} + ], "link_ids": [ + {"link_uuid": {"uuid": "R1@D3/2==R2@D3/1"}}, + {"link_uuid": {"uuid": "R2@D3/3==R3@D3/2"}}, + {"link_uuid": {"uuid": "R3@D3/4==R4@D3/3"}}, + {"link_uuid": {"uuid": "R4@D3/1==R1@D3/4"}}, + {"link_uuid": {"uuid": "R2@D3/4==R4@D3/2"}} + ] + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "R1@D3"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "4"}, + {"sample_types": [], "type": "copper/border", "uuid": "D1"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2@D3"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "3"}, + {"sample_types": [], "type": "copper/internal", "uuid": "4"}, + {"sample_types": [], "type": "copper/border", "uuid": "D2"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3@D3"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "4"}, + {"sample_types": [], "type": "copper/border", "uuid": "D4"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R4@D3"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "3"} + ]}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "R1@D3/2==R2@D3/1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1@D3"}}, "endpoint_uuid": {"uuid": "2"}}, + {"device_id": {"device_uuid": {"uuid": "R2@D3"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D3/3==R3@D3/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D3"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "R3@D3"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R3@D3/4==R4@D3/3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3@D3"}}, "endpoint_uuid": {"uuid": "4"}}, + {"device_id": {"device_uuid": {"uuid": "R4@D3"}}, "endpoint_uuid": {"uuid": "3"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R4@D3/1==R1@D3/4"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4@D3"}}, "endpoint_uuid": {"uuid": "1"}}, + {"device_id": {"device_uuid": {"uuid": "R1@D3"}}, "endpoint_uuid": {"uuid": "4"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D3/4==R4@D3/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D3"}}, "endpoint_uuid": {"uuid": "4"}}, + {"device_id": {"device_uuid": {"uuid": "R4@D3"}}, "endpoint_uuid": {"uuid": "2"}} + ] + } + ] +} diff --git a/src/tests/scenario2/descriptors/domain4.json b/src/tests/scenario2/descriptors/domain4.json new file mode 100644 index 0000000000000000000000000000000000000000..d9e2d049ad2417beb96b8f3434ed9e94febb4808 --- /dev/null +++ b/src/tests/scenario2/descriptors/domain4.json @@ -0,0 +1,101 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D4"}} + ], "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D4"}}, + "device_ids": [ + {"device_uuid": {"uuid": "DC2"}}, + {"device_uuid": {"uuid": "R1@D4"}}, + {"device_uuid": {"uuid": "R2@D4"}}, + {"device_uuid": {"uuid": "R3@D4"}} + ], "link_ids": [ + {"link_uuid": {"uuid": "R3@D4/DC2==DC2/D4"}}, + {"link_uuid": {"uuid": "R1@D4/2==R2@D4/1"}}, + {"link_uuid": {"uuid": "R1@D4/3==R3@D4/1"}}, + {"link_uuid": {"uuid": "R2@D4/3==R3@D4/2"}} + ] + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "DC2"}}, "device_type": "emu-datacenter", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/border", "uuid": "D4"}, + {"sample_types": [], "type": "copper/internal", "uuid": "int"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R1@D4"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "3"}, + {"sample_types": [], "type": "copper/border", "uuid": "D3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2@D4"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "3"}, + {"sample_types": [], "type": "copper/border", "uuid": "D2"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3@D4"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/border", "uuid": "DC2"} + ]}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "R3@D4/DC2==DC2/D4"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "D4"}}, + {"device_id": {"device_uuid": {"uuid": "R3@D4"}}, "endpoint_uuid": {"uuid": "DC2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R1@D4/2==R2@D4/1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1@D4"}}, "endpoint_uuid": {"uuid": "2"}}, + {"device_id": {"device_uuid": {"uuid": "R2@D4"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R1@D4/3==R3@D4/1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1@D4"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "R3@D4"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D4/3==R3@D4/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D4"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "R3@D4"}}, "endpoint_uuid": {"uuid": "2"}} + ] + } + ] +} diff --git a/src/tests/scenario2/descriptors/idc-slice.json b/src/tests/scenario2/descriptors/idc-slice.json new file mode 100644 index 0000000000000000000000000000000000000000..634209284c00cc8602db2bf91e6088ca120710df --- /dev/null +++ b/src/tests/scenario2/descriptors/idc-slice.json @@ -0,0 +1,20 @@ +{ + "slices":[ + { + "slice_id":{"context_id":{"context_uuid":{"uuid":"admin"}},"slice_uuid":{"uuid":"idc-slice"}}, + "slice_endpoint_ids":[ + {"device_id":{"device_uuid":{"uuid":"DC1"}},"endpoint_uuid":{"uuid":"int"}}, + {"device_id":{"device_uuid":{"uuid":"DC2"}},"endpoint_uuid":{"uuid":"int"}} + ], + "slice_status":{"slice_status":1}, + "slice_service_ids":[], + "slice_subslice_ids":[], + "slice_constraints":[], + "slice_config":{"config_rules":[ + {"action":1,"custom":{"resource_key":"/settings","resource_value":"{}"}}, + {"action":1,"custom":{"resource_key":"/device[DC1]/endpoint[int]/settings","resource_value":"{}"}}, + {"action":1,"custom":{"resource_key":"/device[DC2]/endpoint[int]/settings","resource_value":"{}"}} + ]} + } + ] +} diff --git a/src/tests/scenario2/dump_logs.sh b/src/tests/scenario2/dump_logs.sh new file mode 100755 index 0000000000000000000000000000000000000000..c2298fd8ef735eab102d463391004a818c874b42 --- /dev/null +++ b/src/tests/scenario2/dump_logs.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +rm -rf tmp/exec + +echo "Collecting logs for MockBlockChain..." +mkdir -p tmp/exec/mbc +kubectl --namespace tfs-bchain logs deployments/mock-blockchain server > tmp/exec/mbc/mock-blockchain.log +printf "\n" + +echo "Collecting logs for Domain 1..." +mkdir -p tmp/exec/dom1 +kubectl --namespace tfs-dom1 logs deployments/contextservice server > tmp/exec/dom1/context.log +kubectl --namespace tfs-dom1 logs deployments/deviceservice server > tmp/exec/dom1/device.log +kubectl --namespace tfs-dom1 logs deployments/serviceservice server > tmp/exec/dom1/service.log +kubectl --namespace tfs-dom1 logs deployments/pathcompservice frontend > tmp/exec/dom1/pathcomp-frontend.log +kubectl --namespace tfs-dom1 logs deployments/pathcompservice backend > tmp/exec/dom1/pathcomp-backend.log +kubectl --namespace tfs-dom1 logs deployments/sliceservice server > tmp/exec/dom1/slice.log +kubectl --namespace tfs-dom1 logs deployments/interdomainservice server > tmp/exec/dom1/interdomain.log +kubectl --namespace tfs-dom1 logs deployments/dltservice connector > tmp/exec/dom1/dlt-connector.log +kubectl --namespace tfs-dom1 logs deployments/dltservice gateway > tmp/exec/dom1/dlt-gateway.log +printf "\n" + +echo "Collecting logs for Domain 2..." +mkdir -p tmp/exec/dom2 +kubectl --namespace tfs-dom2 logs deployments/contextservice server > tmp/exec/dom2/context.log +kubectl --namespace tfs-dom2 logs deployments/deviceservice server > tmp/exec/dom2/device.log +kubectl --namespace tfs-dom2 logs deployments/serviceservice server > tmp/exec/dom2/service.log +kubectl --namespace tfs-dom2 logs deployments/pathcompservice frontend > tmp/exec/dom2/pathcomp-frontend.log +kubectl --namespace tfs-dom2 logs deployments/pathcompservice backend > tmp/exec/dom2/pathcomp-backend.log +kubectl --namespace tfs-dom2 logs deployments/sliceservice server > tmp/exec/dom2/slice.log +kubectl --namespace tfs-dom2 logs deployments/interdomainservice server > tmp/exec/dom2/interdomain.log +kubectl --namespace tfs-dom2 logs deployments/dltservice connector > tmp/exec/dom2/dlt-connector.log +kubectl --namespace tfs-dom2 logs deployments/dltservice gateway > tmp/exec/dom2/dlt-gateway.log +printf "\n" + +echo "Collecting logs for Domain 3..." +mkdir -p tmp/exec/dom3 +kubectl --namespace tfs-dom3 logs deployments/contextservice server > tmp/exec/dom3/context.log +kubectl --namespace tfs-dom3 logs deployments/deviceservice server > tmp/exec/dom3/device.log +kubectl --namespace tfs-dom3 logs deployments/serviceservice server > tmp/exec/dom3/service.log +kubectl --namespace tfs-dom3 logs deployments/pathcompservice frontend > tmp/exec/dom3/pathcomp-frontend.log +kubectl --namespace tfs-dom3 logs deployments/pathcompservice backend > tmp/exec/dom3/pathcomp-backend.log +kubectl --namespace tfs-dom3 logs deployments/sliceservice server > tmp/exec/dom3/slice.log +kubectl --namespace tfs-dom3 logs deployments/interdomainservice server > tmp/exec/dom3/interdomain.log +kubectl --namespace tfs-dom3 logs deployments/dltservice connector > tmp/exec/dom3/dlt-connector.log +kubectl --namespace tfs-dom3 logs deployments/dltservice gateway > tmp/exec/dom3/dlt-gateway.log +printf "\n" + +echo "Collecting logs for Domain 4..." +mkdir -p tmp/exec/dom4 +kubectl --namespace tfs-dom4 logs deployments/contextservice server > tmp/exec/dom4/context.log +kubectl --namespace tfs-dom4 logs deployments/deviceservice server > tmp/exec/dom4/device.log +kubectl --namespace tfs-dom4 logs deployments/serviceservice server > tmp/exec/dom4/service.log +kubectl --namespace tfs-dom4 logs deployments/pathcompservice frontend > tmp/exec/dom4/pathcomp-frontend.log +kubectl --namespace tfs-dom4 logs deployments/pathcompservice backend > tmp/exec/dom4/pathcomp-backend.log +kubectl --namespace tfs-dom4 logs deployments/sliceservice server > tmp/exec/dom4/slice.log +kubectl --namespace tfs-dom4 logs deployments/interdomainservice server > tmp/exec/dom4/interdomain.log +kubectl --namespace tfs-dom4 logs deployments/dltservice connector > tmp/exec/dom4/dlt-connector.log +kubectl --namespace tfs-dom4 logs deployments/dltservice gateway > tmp/exec/dom4/dlt-gateway.log +printf "\n" + +echo "Done!" diff --git a/src/tests/scenario2/fast_redeploy.sh b/src/tests/scenario2/fast_redeploy.sh new file mode 100644 index 0000000000000000000000000000000000000000..c4880a5afb1e5f40f0848437f51d39447c2c0673 --- /dev/null +++ b/src/tests/scenario2/fast_redeploy.sh @@ -0,0 +1,109 @@ +#!/bin/bash + +kubectl delete namespace tfs-dom1 tfs-dom2 tfs-dom3 tfs-dom4 + +echo "Deploying tfs-dom1 ..." +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom1.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl create namespace tfs-dom1 > ./tmp/logs/deploy-tfs-dom1.log +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom1.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/contextservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/deviceservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/pathcompservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/serviceservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/sliceservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/dltservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/interdomainservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/webuiservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f nfvsdn22/tfs-ingress-dom1.yaml > ./tmp/logs/deploy-tfs-dom1.log +printf "\n" + +echo "Deploying tfs-dom2 ..." +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom2.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl create namespace tfs-dom2 > ./tmp/logs/deploy-tfs-dom2.log +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom2.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/contextservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/deviceservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/pathcompservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/serviceservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/sliceservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/dltservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/interdomainservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/webuiservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f nfvsdn22/tfs-ingress-dom2.yaml > ./tmp/logs/deploy-tfs-dom2.log +printf "\n" + +echo "Deploying tfs-dom3 ..." +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom3.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl create namespace tfs-dom3 > ./tmp/logs/deploy-tfs-dom3.log +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom3.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/contextservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/deviceservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/pathcompservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/serviceservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/sliceservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/dltservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/interdomainservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/webuiservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f nfvsdn22/tfs-ingress-dom3.yaml > ./tmp/logs/deploy-tfs-dom3.log +printf "\n" + +echo "Deploying tfs-dom4 ..." +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom4.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl create namespace tfs-dom4 > ./tmp/logs/deploy-tfs-dom4.log +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom4.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/contextservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/deviceservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/pathcompservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/serviceservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/sliceservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/dltservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/interdomainservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/webuiservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f nfvsdn22/tfs-ingress-dom4.yaml > ./tmp/logs/deploy-tfs-dom4.log +printf "\n" + +echo "Waiting tfs-dom1 ..." +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/contextservice +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/deviceservice +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/pathcompservice +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/serviceservice +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/sliceservice +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/dltservice +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/interdomainservice +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/webuiservice +printf "\n" + +echo "Waiting tfs-dom2 ..." +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/contextservice +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/deviceservice +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/pathcompservice +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/serviceservice +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/sliceservice +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/dltservice +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/interdomainservice +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/webuiservice +printf "\n" + +echo "Waiting tfs-dom3 ..." +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/contextservice +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/deviceservice +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/pathcompservice +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/serviceservice +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/sliceservice +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/dltservice +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/interdomainservice +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/webuiservice +printf "\n" + +echo "Waiting tfs-dom4 ..." +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/contextservice +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/deviceservice +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/pathcompservice +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/serviceservice +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/sliceservice +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/dltservice +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/interdomainservice +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/webuiservice +printf "\n" + +echo "Done!" diff --git a/src/tests/scenario2/nginx-ingress-controller-dom1.yaml b/src/tests/scenario2/nginx-ingress-controller-dom1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1aa1ba48be1bc78e5b0b349dd821e18f80b6953a --- /dev/null +++ b/src/tests/scenario2/nginx-ingress-controller-dom1.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-load-balancer-microk8s-conf-dom1 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-udp-microk8s-conf-dom1 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-tcp-microk8s-conf-dom1 + namespace: ingress +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: tfs-ingress-class-dom1 + annotations: + ingressclass.kubernetes.io/is-default-class: "false" +spec: + controller: tfs.etsi.org/controller-class-dom1 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nginx-ingress-microk8s-controller-dom1 + namespace: ingress + labels: + microk8s-application: nginx-ingress-microk8s-dom1 +spec: + selector: + matchLabels: + name: nginx-ingress-microk8s-dom1 + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + name: nginx-ingress-microk8s-dom1 + spec: + terminationGracePeriodSeconds: 60 + restartPolicy: Always + serviceAccountName: nginx-ingress-microk8s-serviceaccount + containers: + - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0 + imagePullPolicy: IfNotPresent + name: nginx-ingress-microk8s + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + securityContext: + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 # www-data + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + hostPort: 8001 + protocol: TCP + - name: https + containerPort: 443 + hostPort: 4431 + protocol: TCP + - name: health + containerPort: 10254 + hostPort: 12541 + protocol: TCP + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-dom1 + - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-dom1 + - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-dom1 + - --election-id=ingress-controller-leader-dom1 + - --controller-class=tfs.etsi.org/controller-class-dom1 + - --ingress-class=tfs-ingress-class-dom1 + - ' ' + - --publish-status-address=127.0.0.1 diff --git a/src/tests/scenario2/nginx-ingress-controller-dom2.yaml b/src/tests/scenario2/nginx-ingress-controller-dom2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2dac1ecd26a5fd1c679b8e92ae28b51797987b71 --- /dev/null +++ b/src/tests/scenario2/nginx-ingress-controller-dom2.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-load-balancer-microk8s-conf-dom2 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-udp-microk8s-conf-dom2 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-tcp-microk8s-conf-dom2 + namespace: ingress +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: tfs-ingress-class-dom2 + annotations: + ingressclass.kubernetes.io/is-default-class: "false" +spec: + controller: tfs.etsi.org/controller-class-dom2 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nginx-ingress-microk8s-controller-dom2 + namespace: ingress + labels: + microk8s-application: nginx-ingress-microk8s-dom2 +spec: + selector: + matchLabels: + name: nginx-ingress-microk8s-dom2 + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + name: nginx-ingress-microk8s-dom2 + spec: + terminationGracePeriodSeconds: 60 + restartPolicy: Always + serviceAccountName: nginx-ingress-microk8s-serviceaccount + containers: + - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0 + imagePullPolicy: IfNotPresent + name: nginx-ingress-microk8s + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + securityContext: + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 # www-data + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + hostPort: 8002 + protocol: TCP + - name: https + containerPort: 443 + hostPort: 4432 + protocol: TCP + - name: health + containerPort: 10254 + hostPort: 12542 + protocol: TCP + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-dom2 + - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-dom2 + - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-dom2 + - --election-id=ingress-controller-leader-dom2 + - --controller-class=tfs.etsi.org/controller-class-dom2 + - --ingress-class=tfs-ingress-class-dom2 + - ' ' + - --publish-status-address=127.0.0.1 diff --git a/src/tests/scenario2/nginx-ingress-controller-dom3.yaml b/src/tests/scenario2/nginx-ingress-controller-dom3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..06eb6b75342e2b6340f6868404c82504da8e09ec --- /dev/null +++ b/src/tests/scenario2/nginx-ingress-controller-dom3.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-load-balancer-microk8s-conf-dom3 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-udp-microk8s-conf-dom3 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-tcp-microk8s-conf-dom3 + namespace: ingress +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: tfs-ingress-class-dom3 + annotations: + ingressclass.kubernetes.io/is-default-class: "false" +spec: + controller: tfs.etsi.org/controller-class-dom3 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nginx-ingress-microk8s-controller-dom3 + namespace: ingress + labels: + microk8s-application: nginx-ingress-microk8s-dom3 +spec: + selector: + matchLabels: + name: nginx-ingress-microk8s-dom3 + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + name: nginx-ingress-microk8s-dom3 + spec: + terminationGracePeriodSeconds: 60 + restartPolicy: Always + serviceAccountName: nginx-ingress-microk8s-serviceaccount + containers: + - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0 + imagePullPolicy: IfNotPresent + name: nginx-ingress-microk8s + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + securityContext: + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 # www-data + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + hostPort: 8003 + protocol: TCP + - name: https + containerPort: 443 + hostPort: 4433 + protocol: TCP + - name: health + containerPort: 10254 + hostPort: 12543 + protocol: TCP + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-dom3 + - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-dom3 + - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-dom3 + - --election-id=ingress-controller-leader-dom3 + - --controller-class=tfs.etsi.org/controller-class-dom3 + - --ingress-class=tfs-ingress-class-dom3 + - ' ' + - --publish-status-address=127.0.0.1 diff --git a/src/tests/scenario2/nginx-ingress-controller-dom4.yaml b/src/tests/scenario2/nginx-ingress-controller-dom4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c5c2e2f7004cd5ec8b5856b185c4c9de937a7d3f --- /dev/null +++ b/src/tests/scenario2/nginx-ingress-controller-dom4.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-load-balancer-microk8s-conf-dom4 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-udp-microk8s-conf-dom4 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-tcp-microk8s-conf-dom4 + namespace: ingress +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: tfs-ingress-class-dom4 + annotations: + ingressclass.kubernetes.io/is-default-class: "false" +spec: + controller: tfs.etsi.org/controller-class-dom4 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nginx-ingress-microk8s-controller-dom4 + namespace: ingress + labels: + microk8s-application: nginx-ingress-microk8s-dom4 +spec: + selector: + matchLabels: + name: nginx-ingress-microk8s-dom4 + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + name: nginx-ingress-microk8s-dom4 + spec: + terminationGracePeriodSeconds: 60 + restartPolicy: Always + serviceAccountName: nginx-ingress-microk8s-serviceaccount + containers: + - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0 + imagePullPolicy: IfNotPresent + name: nginx-ingress-microk8s + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + securityContext: + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 # www-data + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + hostPort: 8004 + protocol: TCP + - name: https + containerPort: 443 + hostPort: 4434 + protocol: TCP + - name: health + containerPort: 10254 + hostPort: 12544 + protocol: TCP + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-dom4 + - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-dom4 + - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-dom4 + - --election-id=ingress-controller-leader-dom4 + - --controller-class=tfs.etsi.org/controller-class-dom4 + - --ingress-class=tfs-ingress-class-dom4 + - ' ' + - --publish-status-address=127.0.0.1 diff --git a/src/tests/scenario2/reset.sh b/src/tests/scenario2/reset.sh new file mode 100755 index 0000000000000000000000000000000000000000..2bf2cd05559f632b960a5674ea59e334f5123a53 --- /dev/null +++ b/src/tests/scenario2/reset.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +kubectl --namespace tfs-dom1 scale --replicas=0 \ + deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \ + deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice + +kubectl --namespace tfs-dom1 scale --replicas=1 \ + deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \ + deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice diff --git a/src/tests/scenario2/show_deploy.sh b/src/tests/scenario2/show_deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..081b5d3f9430cc3f68b0c1abdf39f0b05eeefae5 --- /dev/null +++ b/src/tests/scenario2/show_deploy.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +echo "Deployment Resources:" +kubectl --namespace tfs-dom1 get all +printf "\n" + +echo "Deployment Ingress:" +kubectl --namespace tfs-dom1 get ingress +printf "\n" diff --git a/src/tests/scenario2/tfs-ingress-dom1.yaml b/src/tests/scenario2/tfs-ingress-dom1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bf2e40352d5acd85fcf9ee446df1a312a40556d6 --- /dev/null +++ b/src/tests/scenario2/tfs-ingress-dom1.yaml @@ -0,0 +1,39 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-dom1 + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 +spec: + ingressClassName: tfs-ingress-class-dom1 + rules: + - http: + paths: + - path: /webui(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 8004 + - path: /grafana(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 3000 + - path: /context(/|$)(.*) + pathType: Prefix + backend: + service: + name: contextservice + port: + number: 8080 + - path: /()(restconf/.*) + pathType: Prefix + backend: + service: + name: computeservice + port: + number: 8080 diff --git a/src/tests/scenario2/tfs-ingress-dom2.yaml b/src/tests/scenario2/tfs-ingress-dom2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..40d9480d75dfad817bb1ffe2052a9a71dbb7322d --- /dev/null +++ b/src/tests/scenario2/tfs-ingress-dom2.yaml @@ -0,0 +1,39 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-dom2 + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 +spec: + ingressClassName: tfs-ingress-class-dom2 + rules: + - http: + paths: + - path: /webui(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 8004 + - path: /grafana(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 3000 + - path: /context(/|$)(.*) + pathType: Prefix + backend: + service: + name: contextservice + port: + number: 8080 + - path: /()(restconf/.*) + pathType: Prefix + backend: + service: + name: computeservice + port: + number: 8080 diff --git a/src/tests/scenario2/tfs-ingress-dom3.yaml b/src/tests/scenario2/tfs-ingress-dom3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..28668b424aa8bd957e12e53583317f336e3b0640 --- /dev/null +++ b/src/tests/scenario2/tfs-ingress-dom3.yaml @@ -0,0 +1,39 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-dom3 + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 +spec: + ingressClassName: tfs-ingress-class-dom3 + rules: + - http: + paths: + - path: /webui(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 8004 + - path: /grafana(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 3000 + - path: /context(/|$)(.*) + pathType: Prefix + backend: + service: + name: contextservice + port: + number: 8080 + - path: /()(restconf/.*) + pathType: Prefix + backend: + service: + name: computeservice + port: + number: 8080 diff --git a/src/tests/scenario2/tfs-ingress-dom4.yaml b/src/tests/scenario2/tfs-ingress-dom4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3774c327ca9ff6d46d538c7a2530a744187b957d --- /dev/null +++ b/src/tests/scenario2/tfs-ingress-dom4.yaml @@ -0,0 +1,39 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-dom4 + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 +spec: + ingressClassName: tfs-ingress-class-dom4 + rules: + - http: + paths: + - path: /webui(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 8004 + - path: /grafana(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 3000 + - path: /context(/|$)(.*) + pathType: Prefix + backend: + service: + name: contextservice + port: + number: 8080 + - path: /()(restconf/.*) + pathType: Prefix + backend: + service: + name: computeservice + port: + number: 8080 diff --git a/src/tests/tools/__init__.py b/src/tests/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/tests/tools/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/tools/mock_osm/Constants.py b/src/tests/tools/mock_osm/Constants.py new file mode 100644 index 0000000000000000000000000000000000000000..44d74169f0fd68073ca4ed5272f3dc7ef3ebf958 --- /dev/null +++ b/src/tests/tools/mock_osm/Constants.py @@ -0,0 +1,16 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +WIM_USERNAME = 'admin' +WIM_PASSWORD = 'admin' diff --git a/src/compute/tests/mock_osm/MockOSM.py b/src/tests/tools/mock_osm/MockOSM.py similarity index 100% rename from src/compute/tests/mock_osm/MockOSM.py rename to src/tests/tools/mock_osm/MockOSM.py diff --git a/src/tests/tools/mock_osm/Tools.py b/src/tests/tools/mock_osm/Tools.py new file mode 100644 index 0000000000000000000000000000000000000000..25a8b6111443424e8bfd2b35501b96a9a762325f --- /dev/null +++ b/src/tests/tools/mock_osm/Tools.py @@ -0,0 +1,48 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, Optional + +def compose_service_endpoint_id(site_id : str, endpoint_id : Dict): + device_uuid = endpoint_id['device_id']['device_uuid']['uuid'] + endpoint_uuid = endpoint_id['endpoint_uuid']['uuid'] + return ':'.join([site_id, device_uuid, endpoint_uuid]) + +def wim_mapping(site_id, ce_endpoint_id, pe_device_id : Optional[Dict] = None, priority=None, redundant=[]): + ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid'] + ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid'] + service_endpoint_id = compose_service_endpoint_id(site_id, ce_endpoint_id) + if pe_device_id is None: + bearer = '{:s}:{:s}'.format(ce_device_uuid, ce_endpoint_uuid) + else: + pe_device_uuid = pe_device_id['device_uuid']['uuid'] + bearer = '{:s}:{:s}'.format(ce_device_uuid, pe_device_uuid) + mapping = { + 'service_endpoint_id': service_endpoint_id, + 'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid, + 'service_mapping_info': { + 'site-id': site_id, + 'bearer': {'bearer-reference': bearer}, + } + } + if priority is not None: mapping['service_mapping_info']['priority'] = priority + if len(redundant) > 0: mapping['service_mapping_info']['redundant'] = redundant + return service_endpoint_id, mapping + +def connection_point(service_endpoint_id : str, encapsulation_type : str, vlan_id : int): + return { + 'service_endpoint_id': service_endpoint_id, + 'service_endpoint_encapsulation_type': encapsulation_type, + 'service_endpoint_encapsulation_info': {'vlan': vlan_id} + } diff --git a/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py b/src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py similarity index 100% rename from src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py rename to src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py diff --git a/src/tests/tools/mock_osm/__init__.py b/src/tests/tools/mock_osm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/tests/tools/mock_osm/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/compute/tests/mock_osm/acknowledgements.txt b/src/tests/tools/mock_osm/acknowledgements.txt similarity index 100% rename from src/compute/tests/mock_osm/acknowledgements.txt rename to src/tests/tools/mock_osm/acknowledgements.txt diff --git a/src/compute/tests/mock_osm/sdnconn.py b/src/tests/tools/mock_osm/sdnconn.py similarity index 100% rename from src/compute/tests/mock_osm/sdnconn.py rename to src/tests/tools/mock_osm/sdnconn.py diff --git a/src/tests/tools/mock_sdn_ctrl/MockMWSdnCtrl.py b/src/tests/tools/mock_sdn_ctrl/MockMWSdnCtrl.py new file mode 100644 index 0000000000000000000000000000000000000000..61eec6fe674a0348a92cb84af439c6fbf8668f28 --- /dev/null +++ b/src/tests/tools/mock_sdn_ctrl/MockMWSdnCtrl.py @@ -0,0 +1,130 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Mock MicroWave SDN controller +# ----------------------------- +# REST server implementing minimal support for: +# - IETF YANG data model for Network Topology +# Ref: https://www.rfc-editor.org/rfc/rfc8345.html +# - IETF YANG data model for Transport Network Client Signals +# Ref: https://www.ietf.org/archive/id/draft-ietf-ccamp-client-signal-yang-07.html + + +# Ref: https://blog.miguelgrinberg.com/post/running-your-flask-application-over-https +# Ref: https://blog.miguelgrinberg.com/post/designing-a-restful-api-using-flask-restful + +import functools, logging, sys, time +from flask import Flask, abort, request +from flask.json import jsonify +from flask_restful import Api, Resource + +BIND_ADDRESS = '0.0.0.0' +BIND_PORT = 8443 +BASE_URL = '/nmswebs/restconf/data' +STR_ENDPOINT = 'https://{:s}:{:s}{:s}'.format(str(BIND_ADDRESS), str(BIND_PORT), str(BASE_URL)) +LOG_LEVEL = logging.DEBUG + +NETWORK_NODES = [ + {'node-id': '172.18.0.1', 'ietf-network-topology:termination-point': [ + {'tp-id': '172.18.0.1:1', 'ietf-te-topology:te': {'name': 'ethernet'}}, + {'tp-id': '172.18.0.1:2', 'ietf-te-topology:te': {'name': 'antena' }}, + ]}, + {'node-id': '172.18.0.2', 'ietf-network-topology:termination-point': [ + {'tp-id': '172.18.0.2:1', 'ietf-te-topology:te': {'name': 'ethernet'}}, + {'tp-id': '172.18.0.2:2', 'ietf-te-topology:te': {'name': 'antena' }}, + ]}, + {'node-id': '172.18.0.3', 'ietf-network-topology:termination-point': [ + {'tp-id': '172.18.0.3:1', 'ietf-te-topology:te': {'name': 'ethernet'}}, + {'tp-id': '172.18.0.3:2', 'ietf-te-topology:te': {'name': 'antena' }}, + ]}, + {'node-id': '172.18.0.4', 'ietf-network-topology:termination-point': [ + {'tp-id': '172.18.0.4:1', 'ietf-te-topology:te': {'name': 'ethernet'}}, + {'tp-id': '172.18.0.4:2', 'ietf-te-topology:te': {'name': 'antena' }}, + ]} +] +NETWORK_LINKS = [ + { + 'source' : {'source-node': '172.18.0.1', 'source-tp': '172.18.0.1:2'}, + 'destination': {'dest-node' : '172.18.0.2', 'dest-tp' : '172.18.0.2:2'}, + }, + { + 'source' : {'source-node': '172.18.0.3', 'source-tp': '172.18.0.3:2'}, + 'destination': {'dest-node' : '172.18.0.4', 'dest-tp' : '172.18.0.4:2'}, + } +] +NETWORK_SERVICES = {} + + +logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") +LOGGER = logging.getLogger(__name__) + +logging.getLogger('werkzeug').setLevel(logging.WARNING) + +def log_request(logger : logging.Logger, response): + timestamp = time.strftime('[%Y-%b-%d %H:%M]') + logger.info('%s %s %s %s %s', timestamp, request.remote_addr, request.method, request.full_path, response.status) + return response + +class Health(Resource): + def get(self): return jsonify({}) + +class Network(Resource): + def get(self, network_uuid : str): + if network_uuid != 'SIAE-ETH-TOPOLOGY': abort(400) + network = {'node': NETWORK_NODES, 'ietf-network-topology:link': NETWORK_LINKS} + return jsonify({'ietf-network:network': network}) + +class Services(Resource): + def get(self): + services = [service for service in NETWORK_SERVICES.values()] + return jsonify({'ietf-eth-tran-service:etht-svc': {'etht-svc-instances': services}}) + + def post(self): + json_request = request.json + if not json_request: abort(400) + if not isinstance(json_request, dict): abort(400) + if 'etht-svc-instances' not in json_request: abort(400) + json_services = json_request['etht-svc-instances'] + if not isinstance(json_services, list): abort(400) + if len(json_services) != 1: abort(400) + svc_data = json_services[0] + etht_svc_name = svc_data['etht-svc-name'] + NETWORK_SERVICES[etht_svc_name] = svc_data + return jsonify({}), 201 + +class DelServices(Resource): + def delete(self, service_uuid : str): + NETWORK_SERVICES.pop(service_uuid, None) + return jsonify({}), 204 + +def main(): + LOGGER.info('Starting...') + + app = Flask(__name__) + app.after_request(functools.partial(log_request, LOGGER)) + + api = Api(app, prefix=BASE_URL) + api.add_resource(Health, '/ietf-network:networks') + api.add_resource(Network, '/ietf-network:networks/network=<string:network_uuid>') + api.add_resource(Services, '/ietf-eth-tran-service:etht-svc') + api.add_resource(DelServices, '/ietf-eth-tran-service:etht-svc/etht-svc-instances=<string:service_uuid>') + + LOGGER.info('Listening on {:s}...'.format(str(STR_ENDPOINT))) + app.run(debug=True, host=BIND_ADDRESS, port=BIND_PORT, ssl_context='adhoc') + + LOGGER.info('Bye') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/tests/tools/mock_sdn_ctrl/README.md b/src/tests/tools/mock_sdn_ctrl/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d8a6fe6b279553e54f13792cbf12f15b2b380dc2 --- /dev/null +++ b/src/tests/tools/mock_sdn_ctrl/README.md @@ -0,0 +1,53 @@ +# Mock MicroWave SDN Controller + +This REST server implements very basic support for the following YANG data models: +- IETF YANG data model for Network Topology + - Ref: https://www.rfc-editor.org/rfc/rfc8345.html +- IETF YANG data model for Transport Network Client Signals + - Ref: https://www.ietf.org/archive/id/draft-ietf-ccamp-client-signal-yang-07.html + +The aim of this server is to enable testing the MicroWaveDeviceDriver and the MicroWaveServiceHandler. +Follow the steps below to perform the test: + +## 1. Deploy TeraFlowSDN controller and the scenario +Deploy the test scenario "microwave_deploy.sh": +```bash +source src/tests/tools/microwave_deploy.sh +./deploy.sh +``` + +## 2. Install requirements and run the Mock MicroWave SDN controller +__NOTE__: if you run the Mock MicroWave SDN controller from the PyEnv used for developping on the TeraFlowSDN framework, +all the requirements are already in place. Install them only if you execute it in a separate/standalone environment. + +Install the required dependencies as follows: +```bash +pip install Flask==2.1.3 Flask-RESTful==0.3.9 +``` + +Run the Mock MicroWave SDN Controller as follows: +```bash +python src/tests/tools/mock_sdn_ctrl/MockMWSdnCtrl.py +``` + +## 3. Deploy the test descriptors +Edit the descriptors to meet your environment specifications. +Edit "network_descriptors.json" and change IP address and port of the MicroWave SDN controller of the "MW" device. +- Set value of config rule "_connect/address" to the address of the host where the Mock MicroWave SDN controller is + running (default="192.168.1.1"). +- Set value of config rule "_connect/port" to the port where your Mock MicroWave SDN controller is listening on + (default="8443"). + +Upload the "network_descriptors.json" through the TeraFlowSDN WebUI. +- If not already selected, select context "admin". +- Check that a network topology with 4 routers + 1 microwave radio system are loaded. They should form 2 rings. + +Upload the "service_descriptor.json" through the TeraFlowSDN WebUI. +- Check that 2 services have been created. +- The "mw-svc" should have a connection and be supported by a sub-service. +- The sub-service should also have a connection. +- The R1, R3, and MW devices should have configuration rules established. + +# 4. Delete the microwave service +Find the "mw-svc" on the WebUI, navigate to its details, and delete the service pressing the "Delete Service" button. +The service, sub-service, and device configuration rules should be removed. diff --git a/src/tests/tools/mock_sdn_ctrl/microwave_deploy.sh b/src/tests/tools/mock_sdn_ctrl/microwave_deploy.sh new file mode 100644 index 0000000000000000000000000000000000000000..2da884899c84491e36d87b87c502d2773cdf7f69 --- /dev/null +++ b/src/tests/tools/mock_sdn_ctrl/microwave_deploy.sh @@ -0,0 +1,22 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +# Supported components are: +# context device automation policy service compute monitoring webui +# interdomain slice pathcomp dlt +# dbscanserving opticalattackmitigator opticalattackdetector +# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector +export TFS_COMPONENTS="context device pathcomp service slice webui" + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Set the neew Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" diff --git a/src/tests/tools/mock_sdn_ctrl/network_descriptors.json b/src/tests/tools/mock_sdn_ctrl/network_descriptors.json new file mode 100644 index 0000000000000000000000000000000000000000..25fa940a49c07fb6708b6e6f303d0661ef1268c7 --- /dev/null +++ b/src/tests/tools/mock_sdn_ctrl/network_descriptors.json @@ -0,0 +1,117 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [], + "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}}, + "device_ids": [], + "link_ids": [] + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "R1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_operational_status": 2, "device_endpoints": [], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"type": "copper", "uuid": "MW", "sample_types": []}, + {"type": "copper", "uuid": "R2", "sample_types": []}, + {"type": "copper", "uuid": "EXT", "sample_types": []} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_operational_status": 2, "device_endpoints": [], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"type": "copper", "uuid": "MW", "sample_types": []}, + {"type": "copper", "uuid": "R1", "sample_types": []}, + {"type": "copper", "uuid": "EXT", "sample_types": []} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_operational_status": 2, "device_endpoints": [], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"type": "copper", "uuid": "MW", "sample_types": []}, + {"type": "copper", "uuid": "R4", "sample_types": []}, + {"type": "copper", "uuid": "EXT", "sample_types": []} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R4"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_operational_status": 2, "device_endpoints": [], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"type": "copper", "uuid": "MW", "sample_types": []}, + {"type": "copper", "uuid": "R3", "sample_types": []}, + {"type": "copper", "uuid": "EXT", "sample_types": []} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "MW"}}, "device_type": "microwave-radio-system", "device_drivers": [4], + "device_operational_status": 2, "device_endpoints": [], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "192.168.1.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8443"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"timeout": 120}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "R1/R2==R2/R1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "R2"}}, + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "R1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R3/R4==R4/R3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "R4"}}, + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "R3"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R1/MW==MW/172.18.0.1:1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "MW"}}, + {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "172.18.0.1:1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2/MW==MW/172.18.0.2:1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2"}}, "endpoint_uuid": {"uuid": "MW"}}, + {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "172.18.0.2:1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R3/MW==MW/172.18.0.3:1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "MW"}}, + {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "172.18.0.3:1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R4/MW==MW/172.18.0.4:1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4"}}, "endpoint_uuid": {"uuid": "MW"}}, + {"device_id": {"device_uuid": {"uuid": "MW"}}, "endpoint_uuid": {"uuid": "172.18.0.4:1"}} + ] + } + ] +} \ No newline at end of file diff --git a/src/tests/tools/mock_sdn_ctrl/service_descriptor.json b/src/tests/tools/mock_sdn_ctrl/service_descriptor.json new file mode 100644 index 0000000000000000000000000000000000000000..a4109bc7b18d2855f97f5bb329d4354a04b31607 --- /dev/null +++ b/src/tests/tools/mock_sdn_ctrl/service_descriptor.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "mw-svc"} + }, + "service_type": 2, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1"}}, "endpoint_uuid": {"uuid": "EXT"}}, + {"device_id": {"device_uuid": {"uuid": "R3"}}, "endpoint_uuid": {"uuid": "EXT"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "10.0"}}, + {"custom": {"constraint_type": "latency[ms]", "constraint_value": "15.2"}} + ], + "service_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "/settings", "resource_value": { + "vlan_id": 121 + }}} + ]} + } + ] +} diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py index 75e1036420d0bc88a790fb7b65f4f4900abaaadd..d60cca6597ced52db8e320f3ba1beb2b032be65b 100644 --- a/src/webui/service/__init__.py +++ b/src/webui/service/__init__.py @@ -19,10 +19,10 @@ from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient def get_working_context() -> str: - if 'context_uuid' in session: - return session['context_uuid'] - else: - return 'Not selected' + return session['context_uuid'] if 'context_uuid' in session else '---' + +def get_working_topology() -> str: + return session['topology_uuid'] if 'topology_uuid' in session else '---' def liveness(): pass @@ -85,6 +85,7 @@ def create_app(use_config=None, web_app_root=None): app.jinja_env.filters['from_json'] = from_json app.jinja_env.globals.update(get_working_context=get_working_context) + app.jinja_env.globals.update(get_working_topology=get_working_topology) if web_app_root is not None: app.wsgi_app = SetSubAppMiddleware(app.wsgi_app, web_app_root) diff --git a/src/webui/service/__main__.py b/src/webui/service/__main__.py index c194be4bcfe71f3665dba75a109aa5fdf9646a8d..ddbda9c511eac4554c168128b3318b3107d892d7 100644 --- a/src/webui/service/__main__.py +++ b/src/webui/service/__main__.py @@ -12,15 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os, sys, logging +import hashlib, sys, logging from prometheus_client import start_http_server from common.Constants import ServiceNameEnum from common.Settings import ( - ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, get_service_baseurl_http, - get_service_port_http, get_setting, wait_for_environment_variables) + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, + get_service_baseurl_http, get_service_port_http, get_setting, wait_for_environment_variables) from webui.service import create_app from webui.Config import MAX_CONTENT_LENGTH, HOST, SECRET_KEY, DEBUG +def create_unique_session_cookie_name() -> str: + hostname = get_setting('HOSTNAME') + if hostname is None: return 'session' + hasher = hashlib.blake2b(digest_size=8) + hasher.update(hostname.encode('UTF-8')) + return 'session:{:s}'.format(str(hasher.hexdigest())) + def main(): log_level = get_log_level() logging.basicConfig(level=log_level) @@ -33,6 +40,8 @@ def main(): get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_HOST ), get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), ]) logger.info('Starting...') @@ -49,6 +58,7 @@ def main(): app = create_app(use_config={ 'SECRET_KEY': SECRET_KEY, 'MAX_CONTENT_LENGTH': MAX_CONTENT_LENGTH, + 'SESSION_COOKIE_NAME': create_unique_session_cookie_name(), }, web_app_root=web_app_root) app.run(host=host, port=service_port, debug=debug) diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py index f1423e92ed63fa778448978167c1c8e646414885..b57c5735d4b26c541d60a885512fe37a2fd626bc 100644 --- a/src/webui/service/device/routes.py +++ b/src/webui/service/device/routes.py @@ -16,7 +16,9 @@ from flask import current_app, render_template, Blueprint, flash, session, redir from common.proto.context_pb2 import ( ConfigActionEnum, ConfigRule, Device, DeviceDriverEnum, DeviceId, DeviceList, DeviceOperationalStatusEnum, - Empty) + Empty, TopologyId) +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from webui.service.device.forms import AddDeviceForm @@ -27,16 +29,28 @@ device_client = DeviceClient() @device.get('/') def home(): - context_uuid = session.get('context_uuid', '-') - if context_uuid == "-": + if 'context_topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) + + context_uuid = session['context_uuid'] + topology_uuid = session['topology_uuid'] + context_client.connect() - response: DeviceList = context_client.ListDevices(Empty()) + json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)) + grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id)) + topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids} + grpc_devices: DeviceList = context_client.ListDevices(Empty()) context_client.close() - return render_template('device/home.html', devices=response.devices, - dde=DeviceDriverEnum, - dose=DeviceOperationalStatusEnum) + + devices = [ + device for device in grpc_devices.devices + if device.device_id.device_uuid.uuid in topo_device_uuids + ] + + return render_template( + 'device/home.html', devices=devices, dde=DeviceDriverEnum, + dose=DeviceOperationalStatusEnum) @device.route('add', methods=['GET', 'POST']) def add(): diff --git a/src/webui/service/link/routes.py b/src/webui/service/link/routes.py index 51e903d9ec28c5aaac20cd49e2f97dd7044e12bf..5b8831b7732443830a6f9b1ef8f7da92b4c41cc0 100644 --- a/src/webui/service/link/routes.py +++ b/src/webui/service/link/routes.py @@ -14,7 +14,9 @@ from flask import current_app, render_template, Blueprint, flash, session, redirect, url_for -from common.proto.context_pb2 import Empty, Link, LinkEvent, LinkId, LinkIdList, LinkList, DeviceId +from common.proto.context_pb2 import Empty, Link, LinkEvent, LinkId, LinkIdList, LinkList, DeviceId, TopologyId +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient @@ -23,18 +25,28 @@ context_client = ContextClient() @link.get('/') def home(): - context_uuid = session.get('context_uuid', '-') - if context_uuid == "-": + if 'context_topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) - request = Empty() + + context_uuid = session['context_uuid'] + topology_uuid = session['topology_uuid'] + context_client.connect() - response = context_client.ListLinks(request) + json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)) + grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id)) + topo_link_uuids = {link_id.link_uuid.uuid for link_id in grpc_topology.link_ids} + grpc_links: LinkList = context_client.ListLinks(Empty()) context_client.close() + + links = [ + link for link in grpc_links.links + if link.link_id.link_uuid.uuid in topo_link_uuids + ] + return render_template( - "link/home.html", - links=response.links, - ) + 'link/home.html', links=links) + @link.route('detail/<path:link_uuid>', methods=('GET', 'POST')) def detail(link_uuid: str): diff --git a/src/webui/service/main/forms.py b/src/webui/service/main/forms.py index abef11e06d6222c6bbab527f3a41ccdc5918480f..b138592fccd3f65831673912d04aba79f2dd3c72 100644 --- a/src/webui/service/main/forms.py +++ b/src/webui/service/main/forms.py @@ -19,20 +19,21 @@ from wtforms import SelectField, FileField, SubmitField from wtforms.validators import DataRequired, Length -class ContextForm(FlaskForm): - context = SelectField( 'Context', - choices=[], - validators=[ - DataRequired(), - Length(min=1) - ]) - +class ContextTopologyForm(FlaskForm): + context_topology = SelectField( + 'Ctx/Topo', + choices=[], + validators=[ + DataRequired(), + Length(min=1) + ]) submit = SubmitField('Submit') class DescriptorForm(FlaskForm): - descriptors = FileField('Descriptors', - validators=[ - FileAllowed(['json'], 'JSON Descriptors only!') - ]) + descriptors = FileField( + 'Descriptors', + validators=[ + FileAllowed(['json'], 'JSON Descriptors only!') + ]) submit = SubmitField('Submit') diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index 9b1b088579c5b01218316bf1c96b5208ff854609..0e008734730867bca741d748c49e3b0589b40e48 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -12,18 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging +import json, logging, re from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request -from common.proto.context_pb2 import Connection, Context, Device, Empty, Link, Service, Slice, Topology, ContextIdList +from common.proto.context_pb2 import Empty, ContextIdList, TopologyId, TopologyIdList +from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from service.client.ServiceClient import ServiceClient from slice.client.SliceClient import SliceClient -from webui.service.main.DescriptorTools import ( - format_custom_config_rules, get_descriptors_add_contexts, get_descriptors_add_services, get_descriptors_add_slices, - get_descriptors_add_topologies, split_devices_by_rules) -from webui.service.main.forms import ContextForm, DescriptorForm +from webui.service.main.forms import ContextTopologyForm, DescriptorForm main = Blueprint('main', __name__) @@ -34,38 +34,6 @@ slice_client = SliceClient() logger = logging.getLogger(__name__) -ENTITY_TO_TEXT = { - # name => singular, plural - 'context' : ('Context', 'Contexts' ), - 'topology' : ('Topology', 'Topologies' ), - 'device' : ('Device', 'Devices' ), - 'link' : ('Link', 'Links' ), - 'service' : ('Service', 'Services' ), - 'slice' : ('Slice', 'Slices' ), - 'connection': ('Connection', 'Connections'), -} - -ACTION_TO_TEXT = { - # action => infinitive, past - 'add' : ('Add', 'Added'), - 'update' : ('Update', 'Updated'), - 'config' : ('Configure', 'Configured'), -} - -def process_descriptor(entity_name, action_name, grpc_method, grpc_class, entities): - entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name] - action_infinitive, action_past = ACTION_TO_TEXT[action_name] - num_ok, num_err = 0, 0 - for entity in entities: - try: - grpc_method(grpc_class(**entity)) - num_ok += 1 - except Exception as e: # pylint: disable=broad-except - flash(f'Unable to {action_infinitive} {entity_name_singluar} {str(entity)}: {str(e)}', 'error') - num_err += 1 - if num_ok : flash(f'{str(num_ok)} {entity_name_plural} {action_past}', 'success') - if num_err: flash(f'{str(num_err)} {entity_name_plural} failed', 'danger') - def process_descriptors(descriptors): try: descriptors_file = request.files[descriptors.name] @@ -75,128 +43,89 @@ def process_descriptors(descriptors): flash(f'Unable to load descriptor file: {str(e)}', 'danger') return - dummy_mode = descriptors.get('dummy_mode' , False) - contexts = descriptors.get('contexts' , []) - topologies = descriptors.get('topologies' , []) - devices = descriptors.get('devices' , []) - links = descriptors.get('links' , []) - services = descriptors.get('services' , []) - slices = descriptors.get('slices' , []) - connections = descriptors.get('connections', []) - - # Format CustomConfigRules in Devices, Services and Slices provided in JSON format - for device in devices: - config_rules = device.get('device_config', {}).get('config_rules', []) - config_rules = format_custom_config_rules(config_rules) - device['device_config']['config_rules'] = config_rules - - for service in services: - config_rules = service.get('service_config', {}).get('config_rules', []) - config_rules = format_custom_config_rules(config_rules) - service['service_config']['config_rules'] = config_rules - - for slice in slices: - config_rules = slice.get('slice_config', {}).get('config_rules', []) - config_rules = format_custom_config_rules(config_rules) - slice['slice_config']['config_rules'] = config_rules - - - # Context and Topology require to create the entity first, and add devices, links, services, slices, etc. in a - # second stage. - contexts_add = get_descriptors_add_contexts(contexts) - topologies_add = get_descriptors_add_topologies(topologies) - - if dummy_mode: - # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks. - context_client.connect() - process_descriptor('context', 'add', context_client.SetContext, Context, contexts_add ) - process_descriptor('topology', 'add', context_client.SetTopology, Topology, topologies_add) - process_descriptor('device', 'add', context_client.SetDevice, Device, devices ) - process_descriptor('link', 'add', context_client.SetLink, Link, links ) - process_descriptor('service', 'add', context_client.SetService, Service, services ) - process_descriptor('slice', 'add', context_client.SetSlice, Slice, slices ) - process_descriptor('connection', 'add', context_client.SetConnection, Connection, connections ) - process_descriptor('context', 'update', context_client.SetContext, Context, contexts ) - process_descriptor('topology', 'update', context_client.SetTopology, Topology, topologies ) - context_client.close() - else: - # Normal mode: follows the automated workflows in the different components - assert len(connections) == 0, 'in normal mode, connections should not be set' - - # Device, Service and Slice require to first create the entity and the configure it - devices_add, devices_config = split_devices_by_rules(devices) - services_add = get_descriptors_add_services(services) - slices_add = get_descriptors_add_slices(slices) - - context_client.connect() - device_client.connect() - service_client.connect() - slice_client.connect() - - process_descriptor('context', 'add', context_client.SetContext, Context, contexts_add ) - process_descriptor('topology', 'add', context_client.SetTopology, Topology, topologies_add) - process_descriptor('device', 'add', device_client .AddDevice, Device, devices_add ) - process_descriptor('device', 'config', device_client .ConfigureDevice, Device, devices_config) - process_descriptor('link', 'add', context_client.SetLink, Link, links ) - process_descriptor('service', 'add', service_client.CreateService, Service, services_add ) - process_descriptor('service', 'update', service_client.UpdateService, Service, services ) - process_descriptor('slice', 'add', slice_client .CreateSlice, Slice, slices_add ) - process_descriptor('slice', 'update', slice_client .UpdateSlice, Slice, slices ) - process_descriptor('context', 'update', context_client.SetContext, Context, contexts ) - process_descriptor('topology', 'update', context_client.SetTopology, Topology, topologies ) - - slice_client.close() - service_client.close() - device_client.close() - context_client.close() + descriptor_loader = DescriptorLoader(descriptors) + results = descriptor_loader.process() + for message,level in compose_notifications(results): + flash(message, level) @main.route('/', methods=['GET', 'POST']) def home(): context_client.connect() device_client.connect() - response: ContextIdList = context_client.ListContextIds(Empty()) - context_form: ContextForm = ContextForm() - context_form.context.choices.append(('', 'Select...')) - - for context in response.context_ids: - context_form.context.choices.append((context.context_uuid.uuid, context.context_uuid)) - - if context_form.validate_on_submit(): - session['context_uuid'] = context_form.context.data - flash(f'The context was successfully set to `{context_form.context.data}`.', 'success') - return redirect(url_for("main.home")) - - if 'context_uuid' in session: - context_form.context.data = session['context_uuid'] + context_topology_form: ContextTopologyForm = ContextTopologyForm() + context_topology_form.context_topology.choices.append(('', 'Select...')) + + ctx_response: ContextIdList = context_client.ListContextIds(Empty()) + for context_id in ctx_response.context_ids: + context_uuid = context_id.context_uuid.uuid + topo_response: TopologyIdList = context_client.ListTopologyIds(context_id) + for topology_id in topo_response.topology_ids: + topology_uuid = topology_id.topology_uuid.uuid + context_topology_uuid = 'ctx[{:s}]/topo[{:s}]'.format(context_uuid, topology_uuid) + context_topology_name = 'Context({:s}):Topology({:s})'.format(context_uuid, topology_uuid) + context_topology_entry = (context_topology_uuid, context_topology_name) + context_topology_form.context_topology.choices.append(context_topology_entry) + + if context_topology_form.validate_on_submit(): + context_topology_uuid = context_topology_form.context_topology.data + if len(context_topology_uuid) > 0: + match = re.match('ctx\[([^\]]+)\]\/topo\[([^\]]+)\]', context_topology_uuid) + if match is not None: + session['context_topology_uuid'] = context_topology_uuid = match.group(0) + session['context_uuid'] = context_uuid = match.group(1) + session['topology_uuid'] = topology_uuid = match.group(2) + MSG = f'Context({context_uuid})/Topology({topology_uuid}) successfully selected.' + flash(MSG, 'success') + return redirect(url_for("main.home")) + + if 'context_topology_uuid' in session: + context_topology_form.context_topology.data = session['context_topology_uuid'] descriptor_form: DescriptorForm = DescriptorForm() try: if descriptor_form.validate_on_submit(): process_descriptors(descriptor_form.descriptors) return redirect(url_for("main.home")) - except Exception as e: + except Exception as e: # pylint: disable=broad-except logger.exception('Descriptor load failed') flash(f'Descriptor load failed: `{str(e)}`', 'danger') finally: context_client.close() device_client.close() - return render_template('main/home.html', context_form=context_form, descriptor_form=descriptor_form) + return render_template( + 'main/home.html', context_topology_form=context_topology_form, descriptor_form=descriptor_form) @main.route('/topology', methods=['GET']) def topology(): context_client.connect() try: + if 'context_topology_uuid' not in session: + return jsonify({'devices': [], 'links': []}) + + context_uuid = session['context_uuid'] + topology_uuid = session['topology_uuid'] + + json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)) + grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id)) + + topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids} + topo_link_uuids = {link_id .link_uuid .uuid for link_id in grpc_topology.link_ids } + response = context_client.ListDevices(Empty()) - devices = [{ - 'id': device.device_id.device_uuid.uuid, - 'name': device.device_id.device_uuid.uuid, - 'type': device.device_type, - } for device in response.devices] + devices = [] + for device in response.devices: + if device.device_id.device_uuid.uuid not in topo_device_uuids: continue + devices.append({ + 'id': device.device_id.device_uuid.uuid, + 'name': device.device_id.device_uuid.uuid, + 'type': device.device_type, + }) response = context_client.ListLinks(Empty()) links = [] for link in response.links: + if link.link_id.link_uuid.uuid not in topo_link_uuids: continue if len(link.link_endpoint_ids) != 2: str_link = grpc_message_to_json_string(link) logger.warning('Unexpected link with len(endpoints) != 2: {:s}'.format(str_link)) diff --git a/src/webui/service/static/topology_icons/Acknowledgements.txt b/src/webui/service/static/topology_icons/Acknowledgements.txt index ddf7a8d0de921219274135fe71fed683b078a4e7..b285d225957b0a4e8c14ac4ae5e078597d2a1b27 100644 --- a/src/webui/service/static/topology_icons/Acknowledgements.txt +++ b/src/webui/service/static/topology_icons/Acknowledgements.txt @@ -1,6 +1,7 @@ Network Topology Icons taken from https://vecta.io/symbols -https://symbols.getvecta.com/stencil_240/51_cloud.4d0a827676.png => cloud.png +https://symbols.getvecta.com/stencil_240/51_cloud.4d0a827676.png => network.png + #modified to be grey instead of white https://symbols.getvecta.com/stencil_240/15_atm-switch.1bbf9a7cca.png => packet-switch.png https://symbols.getvecta.com/stencil_241/45_atm-switch.6a7362c1df.png => emu-packet-switch.png @@ -11,6 +12,10 @@ https://symbols.getvecta.com/stencil_241/224_router.be30fb87e7.png => emu-packet https://symbols.getvecta.com/stencil_240/269_virtual-layer-switch.ed10fdede6.png => open-line-system.png https://symbols.getvecta.com/stencil_241/281_virtual-layer-switch.29420aff2f.png => emu-open-line-system.png +# Temporal icon; to be updated +https://symbols.getvecta.com/stencil_240/269_virtual-layer-switch.ed10fdede6.png => microwave-radio-system.png +https://symbols.getvecta.com/stencil_241/281_virtual-layer-switch.29420aff2f.png => emu-microwave-radio-system.png + https://symbols.getvecta.com/stencil_240/102_ibm-tower.2cc133f3d0.png => datacenter.png https://symbols.getvecta.com/stencil_241/133_ibm-tower.995c44696c.png => emu-datacenter.png diff --git a/src/webui/service/static/topology_icons/cloud.png b/src/webui/service/static/topology_icons/cloud.png deleted file mode 100644 index 0f8e9c9714edd1c11904367ef1e9c60ef7ed3295..0000000000000000000000000000000000000000 Binary files a/src/webui/service/static/topology_icons/cloud.png and /dev/null differ diff --git a/src/webui/service/static/topology_icons/emu-microwave-radio-system.png b/src/webui/service/static/topology_icons/emu-microwave-radio-system.png new file mode 100644 index 0000000000000000000000000000000000000000..a5c30d679170c6e080dee3cc5239bf7ecaefe743 Binary files /dev/null and b/src/webui/service/static/topology_icons/emu-microwave-radio-system.png differ diff --git a/src/webui/service/static/topology_icons/microwave-radio-system.png b/src/webui/service/static/topology_icons/microwave-radio-system.png new file mode 100644 index 0000000000000000000000000000000000000000..b51f094216755ed9fc5c7a7e8957bab88090c954 Binary files /dev/null and b/src/webui/service/static/topology_icons/microwave-radio-system.png differ diff --git a/src/webui/service/static/topology_icons/network.png b/src/webui/service/static/topology_icons/network.png new file mode 100644 index 0000000000000000000000000000000000000000..1f770f7bb2a31834a191e6c8727f059e1f14bbe1 Binary files /dev/null and b/src/webui/service/static/topology_icons/network.png differ diff --git a/src/webui/service/static/topology_icons/xr-constellation.png b/src/webui/service/static/topology_icons/xr-constellation.png new file mode 100644 index 0000000000000000000000000000000000000000..518ca5a60b1d6b9c674783873189566430adccf9 Binary files /dev/null and b/src/webui/service/static/topology_icons/xr-constellation.png differ diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html index 5d7801d11880e89869120985307c6b43416f5a05..bee98ee82da3482caf1fad930d03d30572ba287d 100644 --- a/src/webui/service/templates/base.html +++ b/src/webui/service/templates/base.html @@ -103,7 +103,7 @@ </li> </ul> <span class="navbar-text" style="color: #fff;"> - Current context: <b>{{ get_working_context() }}</b> + Current Context(<b>{{ get_working_context() }}</b>)/Topology(<b>{{ get_working_topology() }}</b>) </span> </div> </div> diff --git a/src/webui/service/templates/main/home.html b/src/webui/service/templates/main/home.html index db390939ff926b5bbfbfc6507b0f4e79695f3693..43b066cc0227801672fc25780f27e3a699338632 100644 --- a/src/webui/service/templates/main/home.html +++ b/src/webui/service/templates/main/home.html @@ -19,7 +19,7 @@ {% block content %} <h2>ETSI TeraFlowSDN Controller</h2> - {% for field, message in context_form.errors.items() %} + {% for field, message in context_topology_form.errors.items() %} <div class="alert alert-dismissible fade show" role="alert"> <b>{{ field }}</b>: {{ message }} <button type="button" class="btn-close" data-bs-dismiss="alert" aria-label="Close"></button> @@ -28,32 +28,32 @@ {% endfor %} <form id="select_context" method="POST" enctype="multipart/form-data"> - {{ context_form.hidden_tag() }} + {{ context_topology_form.hidden_tag() }} <fieldset class="form-group"> - <legend>Select the working context, or upload a JSON descriptors file</legend> + <legend>Select the desired Context/Topology</legend> <div class="row mb-3"> - {{ context_form.context.label(class="col-sm-1 col-form-label") }} + {{ context_topology_form.context_topology.label(class="col-sm-1 col-form-label") }} <div class="col-sm-5"> - {% if context_form.context.errors %} - {{ context_form.context(class="form-select is-invalid") }} + {% if context_topology_form.context_topology.errors %} + {{ context_topology_form.context_topology(class="form-select is-invalid") }} <div class="invalid-feedback"> - {% for error in context_form.context.errors %} + {% for error in context_topology_form.context_topology.errors %} <span>{{ error }}</span> {% endfor %} </div> {% else %} - {{ context_form.context(class="form-select") }} + {{ context_topology_form.context_topology(class="form-select") }} {% endif %} </div> <div class="col-sm-2"> - {{ context_form.submit(class='btn btn-primary') }} + {{ context_topology_form.submit(class='btn btn-primary') }} </div> </div> </fieldset> </form> - <form id="select_context" method="POST" enctype="multipart/form-data"> - {{ context_form.hidden_tag() }} + <form id="upload_descriptors" method="POST" enctype="multipart/form-data"> + {{ descriptor_form.hidden_tag() }} <fieldset class="form-group"> <legend>Upload a JSON descriptors file</legend> <div class="row mb-3"> diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html index 975369ca21d294900c83537916bf527dce4810e9..e1f963e425e23216281068b82da23c809a677296 100644 --- a/src/webui/service/templates/service/detail.html +++ b/src/webui/service/templates/service/detail.html @@ -43,6 +43,7 @@ <div class="row mb-3"> <div class="col-sm-4"> + <b>Context: </b> {{ service.service_id.context_id.context_uuid.uuid }}<br><br> <b>UUID: </b> {{ service.service_id.service_uuid.uuid }}<br><br> <b>Type: </b> {{ ste.Name(service.service_type).replace('SERVICETYPE_', '') }}<br><br> <b>Status: </b> {{ sse.Name(service.service_status.service_status).replace('SERVICESTATUS_', '') }}<br><br> @@ -209,13 +210,17 @@ <ul> {% for sub_service_id in connection.sub_service_ids %} <li> + {% if sub_service_id.context_id.context_uuid.uuid == session['context_uuid'] %} <a href="{{ url_for('service.detail', service_uuid=sub_service_id.service_uuid.uuid) }}"> - {{ sub_service_id.service_uuid.uuid }} + {{ sub_service_id.context_id.context_uuid.uuid }} / {{ sub_service_id.service_uuid.uuid }} <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16"> <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/> <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/> </svg> </a> + {% else %} + {{ sub_service_id.context_id.context_uuid.uuid }} / {{ sub_service_id.service_uuid.uuid }} + {% endif %} </li> {% endfor %} </ul> diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html index 07734f32304b60365f76413d4689a37b66cc60a3..889e10ce53b4a019b55f714c2442f32f0c2b8e93 100644 --- a/src/webui/service/templates/slice/detail.html +++ b/src/webui/service/templates/slice/detail.html @@ -44,7 +44,9 @@ <div class="row mb-3"> <div class="col-sm-4"> + <b>Context: </b> {{ slice.slice_id.context_id.context_uuid.uuid }}<br><br> <b>UUID: </b> {{ slice.slice_id.slice_uuid.uuid }}<br><br> + <b>Owner: </b> {{ slice.slice_owner.owner_uuid.uuid }}<br><br> <b>Status: </b> {{ sse.Name(slice.slice_status.slice_status).replace('SLICESTATUS_', '') }}<br><br> </div> <div class="col-sm-8"> @@ -180,13 +182,17 @@ {% for service_id in slice.slice_service_ids %} <tr> <td> + {% if service_id.context_id.context_uuid.uuid == session['context_uuid'] %} <a href="{{ url_for('service.detail', service_uuid=service_id.service_uuid.uuid) }}"> - {{ service_id.service_uuid.uuid }} + {{ service_id.context_id.context_uuid.uuid }} / {{ service_id.service_uuid.uuid }} <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16"> <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/> <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/> </svg> </a> + {% else %} + {{ service_id.context_id.context_uuid.uuid }} / {{ service_id.service_uuid.uuid }} + {% endif %} </td> </tr> {% endfor %} @@ -204,13 +210,17 @@ {% for subslice_id in slice.slice_subslice_ids %} <tr> <td> + {% if subslice_id.context_id.context_uuid.uuid == session['context_uuid'] %} <a href="{{ url_for('slice.detail', slice_uuid=subslice_id.slice_uuid.uuid) }}"> - {{ subslice_id.slice_uuid.uuid }} + {{ subslice_id.context_id.context_uuid.uuid }} / {{ subslice_id.slice_uuid.uuid }} <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16"> <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/> <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/> </svg> </a> + {% else %} + {{ subslice_id.context_id.context_uuid.uuid }} / {{ subslice_id.slice_uuid.uuid }} + {% endif %} </td> </tr> {% endfor %} diff --git a/tutorial/2-2-ofc22.md b/tutorial/2-2-ofc22.md index 3b55a0961da78fdc78a8feb31499608589b9d0be..04d585d24cc046e6a1aadc1c93118a1b36855aca 100644 --- a/tutorial/2-2-ofc22.md +++ b/tutorial/2-2-ofc22.md @@ -37,9 +37,6 @@ environment and a TeraFlowSDN controller instance as described in the [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the Python environment as described in [Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md). -Remember to source the scenario settings, e.g., `cd ~/tfs-ctrl && source ofc22/deploy_specs.sh` in each terminal you open. -Then, re-build the protocol buffers code from the proto files: -`./proto/generate_code_python.sh` ## 2.2.4. Access to the WebUI and Dashboard @@ -55,25 +52,33 @@ Notes: ## 2.2.5. Test execution -Before executing the tests, the environment variables need to be prepared. -First, make sure to load your deployment variables by: +Before executing the tests, we need to prepare a few things. + +First, you need to make sure that you have all the gRPC-generate code in your folder. +To do so, run: ``` -source my_deploy.sh +proto/generate_code_python.sh ``` -Then, you also need to load the environment variables to support the execution of the -tests by: +Then, it is time to deploy TeraFlowSDN with the correct specification for this scenario. +Make sure to load your deployment variables for this scenario by: ``` -source tfs_runtime_env_vars.sh +source ofc22/deploy_specs.sh ``` -You also need to make sure that you have all the gRPC-generate code in your folder. -To do so, run: +Then, you need to deploy the components by running: ``` -proto/generate_code_python.sh +./deploy.sh +``` + +After the deployment is finished, you need to load the environment variables to support +the execution of the tests by: + +``` +source tfs_runtime_env_vars.sh ``` To execute this functional test, four main steps needs to be carried out: @@ -90,8 +95,24 @@ See the troubleshooting section if needed. You can check the logs of the different components using the appropriate `scripts/show_logs_[component].sh` scripts after you execute each step. +There are two ways to execute the functional tests, *running all the tests with a single script* or *running each test independently*. +In the following we start with the first option, then we comment on how to run each test independently. + + +### 2.2.5.1. Running all tests with a single script + +We have a script that executes all the steps at once. +It is meant for being used to test if all components involved in this scenario are working correct. +To run all the functional tests, you can run: + +``` +ofc22/run_tests_and_coverage.sh +``` + +The following sections explain each one of the steps. -### 2.2.5.1. Device bootstrapping + +### 2.2.5.2. Device bootstrapping This step configures some basic entities (Context and Topology), the devices, and the links in the topology. @@ -103,7 +124,11 @@ The expected results are: To run this step, you can do it from the WebUI by uploading the file `./ofc22/tests/descriptors_emulated.json` that contains the descriptors of the contexts, topologies, devices, and links, or by -executing the `./ofc22/run_test_01_bootstrap.sh` script. +executing the script: + +``` +./ofc22/run_test_01_bootstrap.sh +``` When the bootstrapping finishes, check in the Grafana L3-Monitoring Dashboard and you should see the monitoring data being plotted and updated every 5 seconds (by default). @@ -117,12 +142,16 @@ Note here that the emulated devices produce synthetic randomly-generated monitor and do not represent any particularservices configured. -### 2.2.5.2. L3VPN Service creation +### 2.2.5.3. L3VPN Service creation This step configures a new service emulating the request an OSM WIM would make by means of a Mock OSM instance. -To run this step, execute the `./ofc22/run_test_02_create_service.sh` script. +To run this step, execute the script: + +``` +./ofc22/run_test_02_create_service.sh +``` When the script finishes, check the WebUI *Services* tab. You should see that two services have been created, one for the optical layer and another for the packet layer. @@ -133,13 +162,18 @@ the plots with the monitored data for the device. By default, device R1-EMU is selected. -### 2.2.5.3. L3VPN Service removal +### 2.2.5.4. L3VPN Service removal This step deconfigures the previously created services emulating the request an OSM WIM would make by means of a Mock OSM instance. -To run this step, execute the `./ofc22/run_test_03_delete_service.sh` script, or delete -the L3NM service from the WebUI. +To run this step, execute the script: + +``` +./ofc22/run_test_03_delete_service.sh +``` + +or delete the L3NM service from the WebUI. When the script finishes, check the WebUI *Services* tab. You should see that the two services have been removed. @@ -149,12 +183,16 @@ In the Grafana Dashboard, given that there is no service configured, you should 0-valued flat plot again. -### 2.2.5.4. Cleanup +### 2.2.5.5. Cleanup This last step performs a cleanup of the scenario removing all the TeraFlowSDN entities for completeness. -To run this step, execute the `./ofc22/run_test_04_cleanup.sh` script. +To run this step, execute the script: + +``` +./ofc22/run_test_04_cleanup.sh +``` When the script finishes, check the WebUI *Devices* tab, you should see that the devices have been removed.